diff --git a/go.mod b/go.mod
index 6fd5b2fbd..70348c7d7 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module k8s.io/node-problem-detector
go 1.23.1
require (
- cloud.google.com/go/compute/metadata v0.5.2
+ cloud.google.com/go/compute/metadata v0.6.0
contrib.go.opencensus.io/exporter/prometheus v0.4.2
contrib.go.opencensus.io/exporter/stackdriver v0.13.14
github.com/acobaugh/osrelease v0.1.0
@@ -11,31 +11,31 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0
github.com/euank/go-kmsg-parser v2.0.0+incompatible
github.com/hpcloud/tail v1.0.0
- github.com/prometheus/client_model v0.6.1
- github.com/prometheus/common v0.55.0
- github.com/prometheus/procfs v0.15.1
+ github.com/prometheus/client_model v0.6.2
+ github.com/prometheus/common v0.63.0
+ github.com/prometheus/procfs v0.16.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0
go.opencensus.io v0.24.0
- golang.org/x/sys v0.28.0
- google.golang.org/api v0.192.0
- k8s.io/api v0.31.7
- k8s.io/apimachinery v0.31.7
- k8s.io/client-go v0.31.7
+ golang.org/x/sys v0.32.0
+ google.golang.org/api v0.228.0
+ k8s.io/api v0.32.3
+ k8s.io/apimachinery v0.32.3
+ k8s.io/client-go v0.32.3
k8s.io/klog/v2 v2.130.1
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
)
require (
- cloud.google.com/go/auth v0.8.1 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
+ cloud.google.com/go/auth v0.15.0 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/monitoring v1.20.3 // indirect
cloud.google.com/go/trace v1.10.11 // indirect
github.com/aws/aws-sdk-go v1.44.72 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -46,31 +46,32 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect
- github.com/google/s2a-go v0.1.8 // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.13.0 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
- github.com/prometheus/client_golang v1.19.1 // indirect
+ github.com/prometheus/client_golang v1.20.4 // indirect
github.com/prometheus/prometheus v0.35.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
@@ -78,30 +79,32 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
- golang.org/x/crypto v0.31.0 // indirect
- golang.org/x/net v0.33.0 // indirect
- golang.org/x/oauth2 v0.22.0 // indirect
- golang.org/x/sync v0.10.0 // indirect
- golang.org/x/term v0.27.0 // indirect
- golang.org/x/text v0.21.0 // indirect
- golang.org/x/time v0.6.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
+ go.opentelemetry.io/otel v1.34.0 // indirect
+ go.opentelemetry.io/otel/metric v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.34.0 // indirect
+ golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/net v0.37.0 // indirect
+ golang.org/x/oauth2 v0.28.0 // indirect
+ golang.org/x/sync v0.12.0 // indirect
+ golang.org/x/term v0.30.0 // indirect
+ golang.org/x/text v0.23.0 // indirect
+ golang.org/x/time v0.11.0 // indirect
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
- google.golang.org/grpc v1.64.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
+ google.golang.org/grpc v1.71.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
+ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/go.sum b/go.sum
index 2b23a61de..e125cac28 100644
--- a/go.sum
+++ b/go.sum
@@ -29,10 +29,10 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
-cloud.google.com/go/auth v0.8.1 h1:QZW9FjC5lZzN864p13YxvAtGUlQ+KgRL+8Sg45Z6vxo=
-cloud.google.com/go/auth v0.8.1/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc=
-cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
-cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
+cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
+cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -42,8 +42,8 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
-cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
-cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
+cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
+cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@@ -179,8 +179,8 @@ github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
@@ -449,8 +449,9 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
@@ -473,8 +474,8 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
@@ -591,8 +592,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -619,26 +620,26 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20220318212150-b2ab0324ddda/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
-github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
-github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
-github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
@@ -711,7 +712,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
@@ -752,6 +752,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -768,6 +770,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/linode/linodego v1.4.0/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8=
@@ -875,8 +878,8 @@ github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1ls
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
-github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
+github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -886,8 +889,8 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
-github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
+github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -927,6 +930,7 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -950,15 +954,15 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -973,8 +977,8 @@ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+
github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
+github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
@@ -991,8 +995,8 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
+github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/prometheus/prometheus v0.35.0 h1:N93oX6BrJ2iP3UuE2Uz4Lt+5BkUpaFer3L9CbADzesc=
github.com/prometheus/prometheus v0.35.0/go.mod h1:7HaLx5kEPKJ0GDgbODG0fZgXbQ8K/XjZNJXQmbmgQlY=
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
@@ -1003,8 +1007,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -1161,21 +1165,23 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ=
go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
+go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.1/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40=
@@ -1187,20 +1193,24 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.1/go.mod h1:DAKwdo06hFLc0U88O10x4xnb5sc7dDRDqRuiN+io8JE=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
+go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE=
go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
+go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.opentelemetry.io/proto/otlp v0.12.1/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
@@ -1242,8 +1252,8 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
-golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1349,8 +1359,8 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1369,8 +1379,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
+golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1384,8 +1394,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1515,15 +1525,15 @@ golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
+golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1533,8 +1543,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1544,8 +1554,8 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
-golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
+golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1620,8 +1630,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1662,8 +1672,8 @@ google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQ
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
-google.golang.org/api v0.192.0 h1:PljqpNAfZaaSpS+TnANfnNAXKdzHM/B9bKhwRlo7JP0=
-google.golang.org/api v0.192.0/go.mod h1:9VcphjvAxPKLmSxVSzPlSRXy/5ARMEw5bf58WoVXafQ=
+google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs=
+google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1747,10 +1757,10 @@ google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY=
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M=
-google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
-google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
+google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1785,8 +1795,8 @@ google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
-google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
+google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
+google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1803,8 +1813,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1816,6 +1826,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
@@ -1861,16 +1873,16 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
-k8s.io/api v0.31.7 h1:wSo59nXpVXmaB6hgNVJCrdnKtyYoutIgpNNBbROBd2U=
-k8s.io/api v0.31.7/go.mod h1:vLUha4nXRUGtQdayzsmjur0lQApK/sJSxyR/fwuujcU=
+k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
+k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
-k8s.io/apimachinery v0.31.7 h1:fpV8yLerIZFAkj0of66+i1ArPv/Btf9KO6Aulng7RRw=
-k8s.io/apimachinery v0.31.7/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
+k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@@ -1880,8 +1892,8 @@ k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.31.7 h1:2+LFJc6Xw6rhmpDbN1NSmhoFLWBh62cPG/P+IfaTSGY=
-k8s.io/client-go v0.31.7/go.mod h1:hrrMorBQ17LqzoKIxKg5cSWvmWl94EwA/MUF0Mkf+Zw=
+k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
+k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
@@ -1910,16 +1922,16 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
+k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
+k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
@@ -1927,15 +1939,15 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
diff --git a/test/go.mod b/test/go.mod
index 2f1a9a867..1908d1d26 100644
--- a/test/go.mod
+++ b/test/go.mod
@@ -7,63 +7,62 @@ replace k8s.io/node-problem-detector => ../.
require (
github.com/avast/retry-go/v4 v4.6.1
github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.31.1
+ github.com/onsi/gomega v1.37.0
github.com/pborman/uuid v1.2.1
github.com/spf13/pflag v1.0.6
- golang.org/x/crypto v0.31.0
- golang.org/x/oauth2 v0.22.0
- google.golang.org/api v0.192.0
- k8s.io/apimachinery v0.31.7
- k8s.io/component-base v0.29.15
+ golang.org/x/crypto v0.37.0
+ golang.org/x/oauth2 v0.29.0
+ google.golang.org/api v0.228.0
+ k8s.io/apimachinery v0.32.3
+ k8s.io/component-base v0.32.3
k8s.io/klog/v2 v2.130.1
- k8s.io/node-problem-detector v0.8.19
+ k8s.io/node-problem-detector v0.8.20
sigs.k8s.io/boskos v0.0.0-20200515170311-7d36bde8cdf6
)
require (
- cloud.google.com/go/auth v0.8.1 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
- cloud.google.com/go/compute/metadata v0.5.2 // indirect
+ cloud.google.com/go/auth v0.15.0 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
+ cloud.google.com/go/compute/metadata v0.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/s2a-go v0.1.8 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.13.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.0.0 // indirect
- github.com/kr/text v0.2.0 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nxadm/tail v1.4.8 // indirect
- github.com/prometheus/client_golang v1.19.1 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_golang v1.20.4 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.63.0 // indirect
+ github.com/prometheus/procfs v0.16.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
- golang.org/x/net v0.33.0 // indirect
- golang.org/x/sys v0.28.0 // indirect
- golang.org/x/text v0.21.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
- google.golang.org/grpc v1.64.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
+ go.opentelemetry.io/otel v1.34.0 // indirect
+ go.opentelemetry.io/otel/metric v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.34.0 // indirect
+ golang.org/x/net v0.37.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
+ golang.org/x/tools v0.30.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
+ google.golang.org/grpc v1.71.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783 // indirect
- k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
+ k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/test/go.sum b/test/go.sum
index de500fe03..e54005025 100644
--- a/test/go.sum
+++ b/test/go.sum
@@ -12,13 +12,13 @@ cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.47.0/go.mod h1:5p3Ky/7f3N10VBkhuR5LFtddroTiMyjZV/Kj5qOQFxU=
-cloud.google.com/go/auth v0.8.1 h1:QZW9FjC5lZzN864p13YxvAtGUlQ+KgRL+8Sg45Z6vxo=
-cloud.google.com/go/auth v0.8.1/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc=
-cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
-cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
+cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
+cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
-cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
+cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
+cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/logging v1.0.0/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -143,8 +143,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
github.com/clarketm/json v1.13.4/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo=
@@ -178,7 +178,6 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -379,8 +378,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.0.0-20200115214256-379933c9c22b/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-licenses v0.0.0-20191112164736-212ea350c932/go.mod h1:16wa6pRqNDUIhOtwF0GcROVqMeXHZJ7H6eGDFUh5Pfk=
@@ -396,11 +395,11 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.m
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
-github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
-github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -408,13 +407,13 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
-github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
@@ -493,6 +492,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -508,6 +509,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
@@ -581,8 +584,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
-github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
+github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -590,8 +593,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
-github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0=
+github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
+github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -636,16 +639,16 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@@ -655,8 +658,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
+github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -667,8 +670,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
+github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190706150252-9beb055b7962/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -676,8 +679,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -787,14 +790,20 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
+go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
+go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
+go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
+go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
+go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@@ -831,8 +840,8 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
-golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -894,15 +903,15 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
-golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
+golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -910,8 +919,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -956,17 +965,17 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1011,8 +1020,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200214144324-88be01311a71/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200303214625-2b0b585e22fe/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
+golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1033,8 +1042,8 @@ google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.192.0 h1:PljqpNAfZaaSpS+TnANfnNAXKdzHM/B9bKhwRlo7JP0=
-google.golang.org/api v0.192.0/go.mod h1:9VcphjvAxPKLmSxVSzPlSRXy/5ARMEw5bf58WoVXafQ=
+google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs=
+google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1058,10 +1067,10 @@ google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBr
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
-google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
+google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1078,8 +1087,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
-google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
+google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
+google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1089,8 +1098,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1132,8 +1141,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
@@ -1155,8 +1162,8 @@ k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76/go.mod h1:M2fZgZL9DbLfeJa
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
-k8s.io/apimachinery v0.31.7 h1:fpV8yLerIZFAkj0of66+i1ArPv/Btf9KO6Aulng7RRw=
-k8s.io/apimachinery v0.31.7/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
+k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
@@ -1171,8 +1178,8 @@ k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+
k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ=
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
-k8s.io/component-base v0.29.15 h1:CvmXXTDyk43FDaiJ/Rp+yWFjw6hkUI2t7mIJUrK5j00=
-k8s.io/component-base v0.29.15/go.mod h1:jH/sbuvmXew2Fz2iIKNMeNw8o/d1KR9tAg6uekQKnVk=
+k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k=
+k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI=
k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1192,8 +1199,8 @@ k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783 h1:dLB5TiQVoLcFkj1TPQSahZiJFP8NL+63tawMlX5aV8w=
k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783/go.mod h1:bW6thaPZfL2hW7ecjx2WYwlP9KQLM47/xIJyttkVk5s=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
-k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
+k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 4d80cb219..500c34cf4 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,174 @@
# Changelog
+## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19)
+
+
+### Features
+
+* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699))
+
+## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24)
+
+
+### Documentation
+
+* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941))
+
+## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08)
+
+
+### Features
+
+* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379)
+* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13)
+
+
+### Features
+
+* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d))
+* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f))
+
+
+### Bug Fixes
+
+* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90))
+
+## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10)
+
+
+### Bug Fixes
+
+* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1))
+
+## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04)
+
+
+### Features
+
+* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005)
+
+
+### Bug Fixes
+
+* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188)
+
+## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21)
+
+
+### Features
+
+* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344))
+
+## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12)
+
+
+### Bug Fixes
+
+* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556)
+
+## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06)
+
+
+### Bug Fixes
+
+* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2))
+* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6))
+
+## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30)
+
+
+### Features
+
+* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b))
+
+## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22)
+
+
+### Bug Fixes
+
+* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844)
+* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114))
+
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
+## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
+
+
+### Bug Fixes
+
+* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804)
+
+## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30)
+
+
+### Bug Fixes
+
+* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742)
+* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795)
+
+
+### Documentation
+
+* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437))
+
+## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22)
+
+
+### Bug Fixes
+
+* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948))
+
+## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16)
+
+
+### Features
+
+* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45))
+
## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13)
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
index 36de276a0..6fe4f0763 100644
--- a/vendor/cloud.google.com/go/auth/README.md
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -1,4 +1,40 @@
-# auth
+# Google Auth Library for Go
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
index 41e03f293..cd5e98868 100644
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package auth provides utilities for managing Google Cloud credentials,
+// including functionality for creating, caching, and refreshing OAuth2 tokens.
+// It offers customizable options for different OAuth2 flows, such as 2-legged
+// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic
+// token management.
package auth
import (
@@ -19,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"strings"
@@ -27,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -130,7 +137,9 @@ func (t *Token) isEmpty() bool {
}
// Credentials holds Google credentials, including
-// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
+// [Application Default Credentials].
+//
+// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials
type Credentials struct {
json []byte
projectID CredentialsPropertyProvider
@@ -220,9 +229,7 @@ type CredentialsOptions struct {
UniverseDomainProvider CredentialsPropertyProvider
}
-// NewCredentials returns new [Credentials] from the provided options. Most users
-// will want to build this object a function from the
-// [cloud.google.com/go/auth/credentials] package.
+// NewCredentials returns new [Credentials] from the provided options.
func NewCredentials(opts *CredentialsOptions) *Credentials {
creds := &Credentials{
TokenProvider: opts.TokenProvider,
@@ -235,8 +242,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials {
return creds
}
-// CachedTokenProviderOptions provided options for configuring a
-// CachedTokenProvider.
+// CachedTokenProviderOptions provides options for configuring a cached
+// [TokenProvider].
type CachedTokenProviderOptions struct {
// DisableAutoRefresh makes the TokenProvider always return the same token,
// even if it is expired. The default is false. Optional.
@@ -246,7 +253,7 @@ type CachedTokenProviderOptions struct {
// seconds. Optional.
ExpireEarly time.Duration
// DisableAsyncRefresh configures a synchronous workflow that refreshes
- // stale tokens while blocking. The default is false. Optional.
+ // tokens in a blocking manner. The default is false. Optional.
DisableAsyncRefresh bool
}
@@ -258,7 +265,7 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
}
func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
- if ctpo == nil {
+ if ctpo == nil || ctpo.ExpireEarly == 0 {
return defaultExpiryDelta
}
return ctpo.ExpireEarly
@@ -273,12 +280,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool {
// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
// by the underlying provider. By default it will refresh tokens asynchronously
-// (non-blocking mode) within a window that starts 3 minutes and 45 seconds
-// before they expire. The asynchronous (non-blocking) refresh can be changed to
-// a synchronous (blocking) refresh using the
-// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry
-// duration can be configured using the CachedTokenProviderOptions.ExpireEarly
-// option.
+// a few minutes before they expire.
func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
if ctp, ok := tp.(*cachedTokenProvider); ok {
return ctp
@@ -321,7 +323,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err
defer c.mu.Unlock()
return c.cachedToken, nil
case stale:
- c.tokenAsync(ctx)
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
// Return the stale token immediately to not block customer requests to Cloud services.
c.mu.Lock()
defer c.mu.Unlock()
@@ -336,13 +340,14 @@ func (c *cachedTokenProvider) tokenState() tokenState {
c.mu.Lock()
defer c.mu.Unlock()
t := c.cachedToken
+ now := timeNow()
if t == nil || t.Value == "" {
return invalid
} else if t.Expiry.IsZero() {
return fresh
- } else if timeNow().After(t.Expiry.Round(0)) {
+ } else if now.After(t.Expiry.Round(0)) {
return invalid
- } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
return stale
}
return fresh
@@ -487,6 +492,11 @@ type Options2LO struct {
// UseIDToken requests that the token returned be an ID token if one is
// returned from the server. Optional.
UseIDToken bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options2LO) client() *http.Client {
@@ -517,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
- return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
+ return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil
}
type tokenProvider2LO struct {
opts *Options2LO
Client *http.Client
+ logger *slog.Logger
}
func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
@@ -557,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(tp.Client, req)
if err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
+ tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, &Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
index 6f70fa353..e4a8078f8 100644
--- a/vendor/cloud.google.com/go/auth/credentials/compute.go
+++ b/vendor/cloud.google.com/go/auth/credentials/compute.go
@@ -37,8 +37,12 @@ var (
// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
// uses the metadata service to retrieve tokens.
-func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
- return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{
+func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider {
+ return auth.NewCachedTokenProvider(&computeProvider{
+ scopes: opts.Scopes,
+ client: client,
+ tokenBindingType: opts.TokenBindingType,
+ }, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
DisableAsyncRefresh: opts.DisableAsyncRefresh,
})
@@ -46,7 +50,9 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
// computeProvider fetches tokens from the google cloud metadata service.
type computeProvider struct {
- scopes []string
+ scopes []string
+ client *metadata.Client
+ tokenBindingType TokenBindingType
}
type metadataTokenResp struct {
@@ -55,17 +61,27 @@ type metadataTokenResp struct {
TokenType string `json:"token_type"`
}
-func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
+func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
tokenURI, err := url.Parse(computeTokenURI)
if err != nil {
return nil, err
}
- if len(cs.scopes) > 0 {
+ hasScopes := len(cs.scopes) > 0
+ if hasScopes || cs.tokenBindingType != NoBinding {
v := url.Values{}
- v.Set("scopes", strings.Join(cs.scopes, ","))
+ if hasScopes {
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ }
+ switch cs.tokenBindingType {
+ case MTLSHardBinding:
+ v.Set("transport", "mtls")
+ v.Set("binding-enforcement", "on")
+ case ALTSHardBinding:
+ v.Set("transport", "alts")
+ }
tokenURI.RawQuery = v.Encode()
}
- tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String())
+ tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String())
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
index cce622418..d8f7d9614 100644
--- a/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -19,6 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"os"
"time"
@@ -27,6 +28,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/compute/metadata"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -49,6 +51,23 @@ var (
allowOnGCECheck = true
)
+// TokenBindingType specifies the type of binding used when requesting a token
+// whether to request a hard-bound token using mTLS or an instance identity
+// bound token using ALTS.
+type TokenBindingType int
+
+const (
+ // NoBinding specifies that requested tokens are not required to have a
+ // binding. This is the default option.
+ NoBinding TokenBindingType = iota
+ // MTLSHardBinding specifies that a hard-bound token should be requested
+ // using an mTLS with S2A channel.
+ MTLSHardBinding
+ // ALTSHardBinding specifies that an instance identity bound token should
+ // be requested using an ALTS channel.
+ ALTSHardBinding
+)
+
// OnGCE reports whether this process is running in Google Cloud.
func OnGCE() bool {
// TODO(codyoss): once all libs use this auth lib move metadata check here
@@ -96,12 +115,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
}
if OnGCE() {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: opts.logger(),
+ })
return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: computeTokenProvider(opts),
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
- return metadata.ProjectID()
+ TokenProvider: computeTokenProvider(opts, metadataClient),
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return metadataClient.ProjectIDWithContext(ctx)
}),
- UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
+ UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
+ MetadataClient: metadataClient,
+ },
}), nil
}
@@ -114,6 +138,10 @@ type DetectOptions struct {
// https://www.googleapis.com/auth/cloud-platform. Required if Audience is
// not provided.
Scopes []string
+ // TokenBindingType specifies the type of binding used when requesting a
+ // token whether to request a hard-bound token using mTLS or an instance
+ // identity bound token using ALTS. Optional.
+ TokenBindingType TokenBindingType
// Audience that credentials tokens should have. Only applicable for 2LO
// flows with service accounts. If specified, scopes should not be provided.
Audience string
@@ -142,10 +170,26 @@ type DetectOptions struct {
// CredentialsFile overrides detection logic and sources a credential file
// from the provided filepath. If provided, CredentialsJSON must not be.
// Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsFile string
// CredentialsJSON overrides detection logic and uses the JSON bytes as the
// source for the credential. If provided, CredentialsFile must not be.
// Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsJSON []byte
// UseSelfSignedJWT directs service account based credentials to create a
// self-signed JWT with the private key found in the file, skipping any
@@ -158,6 +202,11 @@ type DetectOptions struct {
// The default value is "googleapis.com". This option is ignored for
// authentication flows that do not support universe domain. Optional.
UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *DetectOptions) validate() error {
@@ -193,6 +242,10 @@ func (o *DetectOptions) client() *http.Client {
return internal.DefaultClient()
}
+func (o *DetectOptions) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
b, err := os.ReadFile(filename)
if err != nil {
@@ -253,6 +306,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
Client: opts.client(),
+ Logger: opts.logger(),
EarlyTokenExpiry: opts.EarlyTokenRefresh,
AuthHandlerOpts: handleOpts,
// TODO(codyoss): refactor this out. We need to add in auto-detection
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index b426e16d2..e5243e6cf 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
- var projectID, quotaProjectID, universeDomain string
+ var projectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
case credsfile.ServiceAccountKey:
@@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ExternalAccountKey:
f, err := credsfile.ParseExternalAccount(b)
@@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.ExternalAccountAuthorizedUserKey:
f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
@@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ImpersonatedServiceAccountKey:
f, err := credsfile.ParseImpersonatedServiceAccount(b)
@@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
}),
- JSON: b,
- ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
- QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
+ JSON: b,
+ ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
+ // TODO(codyoss): only set quota project here if there was a user override
UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
}), nil
}
@@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string
}
func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
if opts.UseSelfSignedJWT {
return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
Email: f.ClientEmail,
@@ -138,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions)
TokenURL: f.TokenURL,
Subject: opts.Subject,
Client: opts.client(),
+ Logger: opts.logger(),
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
@@ -156,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions)
EarlyTokenExpiry: opts.EarlyTokenRefresh,
RefreshToken: f.RefreshToken,
Client: opts.client(),
+ Logger: opts.logger(),
}
return auth.New3LOTokenProvider(opts3LO)
}
@@ -174,6 +179,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions
Scopes: opts.scopes(),
WorkforcePoolUserProject: f.WorkforcePoolUserProject,
Client: opts.client(),
+ Logger: opts.logger(),
IsDefaultClient: opts.Client == nil,
}
if f.ServiceAccountImpersonation != nil {
@@ -192,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU
ClientSecret: f.ClientSecret,
Scopes: opts.scopes(),
Client: opts.client(),
+ Logger: opts.logger(),
}
return externalaccountuser.NewTokenProvider(externalOpts)
}
@@ -211,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil
Tp: tp,
Delegates: f.Delegates,
Client: opts.client(),
+ Logger: opts.logger(),
})
}
@@ -218,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO
return gdch.NewTokenProvider(f, &gdch.Options{
STSAudience: opts.STSAudience,
Client: opts.client(),
+ Logger: opts.logger(),
})
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
index a34f6b06f..9ecd1f64b 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
var (
@@ -87,6 +89,7 @@ type awsSubjectProvider struct {
reqOpts *RequestOptions
Client *http.Client
+ logger *slog.Logger
}
func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -94,32 +97,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
if err != nil {
return "", err
}
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
@@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e
}
req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
+ sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body)
}
@@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]
for name, value := range headers {
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body)
}
@@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context
for name, value := range headers {
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return result, err
}
+ sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body)
}
@@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
index 112186a9e..a82206423 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
+ "log/slog"
"net/http"
"regexp"
"strconv"
@@ -28,6 +29,7 @@ import (
"cloud.google.com/go/auth/credentials/internal/impersonate"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -104,6 +106,11 @@ type Options struct {
// This is important for X509 credentials which should create a new client if the default was used
// but should respect a client explicitly passed in by the user.
IsDefaultClient bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
// SubjectTokenProvider can be used to supply a subject token to exchange for a
@@ -224,6 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
return nil, err
}
opts.resolveTokenURL()
+ logger := internallog.New(opts.Logger)
stp, err := newSubjectTokenProvider(opts)
if err != nil {
return nil, err
@@ -238,6 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
client: client,
opts: opts,
stp: stp,
+ logger: logger,
}
if opts.ServiceAccountImpersonationURL == "" {
@@ -254,6 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
Scopes: scopes,
Tp: auth.NewCachedTokenProvider(tp, nil),
TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
+ Logger: logger,
})
if err != nil {
return nil, err
@@ -269,6 +279,7 @@ type subjectTokenProvider interface {
// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
type tokenProvider struct {
client *http.Client
+ logger *slog.Logger
opts *Options
stp subjectTokenProvider
}
@@ -310,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
Authentication: clientAuth,
Headers: header,
ExtraOpts: options,
+ Logger: tp.logger,
})
if err != nil {
return nil, err
@@ -330,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
// subjectTokenProvider
func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
+ logger := internallog.New(o.Logger)
reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
if o.AwsSecurityCredentialsProvider != nil {
return &awsSubjectProvider{
securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
TargetResource: o.Audience,
reqOpts: reqOpts,
+ logger: logger,
}, nil
} else if o.SubjectTokenProvider != nil {
return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
@@ -352,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
CredVerificationURL: o.CredentialSource.URL,
TargetResource: o.Audience,
Client: o.Client,
+ logger: logger,
}
if o.CredentialSource.IMDSv2SessionTokenURL != "" {
awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
@@ -362,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
} else if o.CredentialSource.File != "" {
return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
} else if o.CredentialSource.URL != "" {
- return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
+ return &urlSubjectProvider{
+ URL: o.CredentialSource.URL,
+ Headers: o.CredentialSource.Headers,
+ Format: o.CredentialSource.Format,
+ Client: o.Client,
+ Logger: logger,
+ }, nil
} else if o.CredentialSource.Executable != nil {
ec := o.CredentialSource.Executable
if ec.Command == "" {
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
index 0a020599e..754ecf4fe 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
@@ -19,10 +19,12 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -38,6 +40,7 @@ type urlSubjectProvider struct {
Headers map[string]string
Format *credsfile.Format
Client *http.Client
+ Logger *slog.Logger
}
func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -49,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error)
for key, val := range sp.Headers {
req.Header.Add(key, val)
}
+ sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
}
+ sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return "", fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
index 0d7885479..ae39206e5 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
@@ -17,12 +17,14 @@ package externalaccountuser
import (
"context"
"errors"
+ "log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// Options stores the configuration for fetching tokens with external authorized
@@ -51,6 +53,8 @@ type Options struct {
// Client for token request.
Client *http.Client
+ // Logger for logging.
+ Logger *slog.Logger
}
func (c *Options) validate() bool {
@@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
RefreshToken: opts.RefreshToken,
Authentication: clientAuth,
Headers: headers,
+ Logger: internallog.New(tp.o.Logger),
})
if err != nil {
return nil, err
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
index 720045d3b..c2d320fdf 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
@@ -16,12 +16,13 @@ package gdch
import (
"context"
- "crypto/rsa"
+ "crypto"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -51,6 +53,7 @@ var (
type Options struct {
STSAudience string
Client *http.Client
+ Logger *slog.Logger
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
@@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
if o.STSAudience == "" {
return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
}
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, err
}
@@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
tokenURL: f.TokenURL,
aud: o.STSAudience,
- pk: pk,
+ signer: signer,
pkID: f.PrivateKeyID,
certPool: certPool,
client: o.Client,
+ logger: internallog.New(o.Logger),
}
return tp, nil
}
@@ -97,11 +101,12 @@ type gdchProvider struct {
serviceIdentity string
tokenURL string
aud string
- pk *rsa.PrivateKey
+ signer crypto.Signer
pkID string
certPool *x509.CertPool
client *http.Client
+ logger *slog.Logger
}
func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
@@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(g.pkID),
}
- payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
+ payload, err := jwt.EncodeJWS(&h, &claims, g.signer)
if err != nil {
return nil, err
}
@@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(g.client, req)
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
+ g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, &auth.Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
new file mode 100644
index 000000000..705462c16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
@@ -0,0 +1,105 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN"
+)
+
+// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token].
+type IDTokenIAMOptions struct {
+ // Client is required.
+ Client *http.Client
+ // Logger is required.
+ Logger *slog.Logger
+ UniverseDomain auth.CredentialsPropertyProvider
+ ServiceAccountEmail string
+ GenerateIDTokenRequest
+}
+
+// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC.
+type GenerateIDTokenRequest struct {
+ Audience string `json:"audience"`
+ IncludeEmail bool `json:"includeEmail"`
+ // Delegates are the ordered, fully-qualified resource name for service
+ // accounts in a delegation chain. Each service account must be granted
+ // roles/iam.serviceAccountTokenCreator on the next service account in the
+ // chain. The delegates must have the following format:
+ // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard
+ // character is required; replacing it with a project ID is invalid.
+ // Optional.
+ Delegates []string `json:"delegates,omitempty"`
+}
+
+// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC.
+type GenerateIDTokenResponse struct {
+ Token string `json:"token"`
+}
+
+// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions].
+func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) {
+ universeDomain, err := o.UniverseDomain.GetProperty(ctx)
+ if err != nil {
+ return nil, err
+ }
+ endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1)
+ url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail))
+
+ bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to create request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err)
+ }
+ o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("impersonate: status code %d: %s", c, body)
+ }
+
+ var tokenResp GenerateIDTokenResponse
+ if err := json.Unmarshal(body, &tokenResp); err != nil {
+ return nil, fmt.Errorf("impersonate: unable to parse response: %w", err)
+ }
+ return &auth.Token{
+ Value: tokenResp.Token,
+ // Generated ID tokens are good for one hour.
+ Expiry: time.Now().Add(1 * time.Hour),
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
index ed53afa51..b3a99261f 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
@@ -20,11 +20,13 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -74,6 +76,11 @@ type Options struct {
// Client configures the underlying client used to make network requests
// when fetching tokens. Required.
Client *http.Client
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options) validate() error {
@@ -88,6 +95,7 @@ func (o *Options) validate() error {
// Token performs the exchange to get a temporary service account token to allow access to GCP.
func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
+ logger := internallog.New(o.Logger)
lifetime := defaultTokenLifetime
if o.TokenLifetimeSeconds != 0 {
lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
@@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
if err := setAuthHeader(ctx, o.Tp, req); err != nil {
return nil, err
}
+ logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
resp, body, err := internal.DoRequest(o.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
}
+ logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
index 768a9dafc..e1d2b1503 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
@@ -19,6 +19,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"strconv"
@@ -26,6 +27,7 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -40,6 +42,7 @@ const (
// Options stores the configuration for making an sts exchange request.
type Options struct {
Client *http.Client
+ Logger *slog.Logger
Endpoint string
Request *TokenRequest
Authentication ClientAuthentication
@@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
opts.Authentication.InjectAuthentication(data, opts.Headers)
encodedData := data.Encode()
+ logger := internallog.New(opts.Logger)
req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
if err != nil {
@@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo
}
req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
+ logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData)))
resp, body, err := internal.DoRequest(opts.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
}
+ logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
index b62a8ae4d..8d335ccec 100644
--- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -16,8 +16,10 @@ package credentials
import (
"context"
- "crypto/rsa"
+ "crypto"
+ "errors"
"fmt"
+ "log/slog"
"strings"
"time"
@@ -35,7 +37,10 @@ var (
// configureSelfSignedJWT uses the private key in the service account to create
// a JWT without making a network call.
func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
}
@@ -43,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions
email: f.ClientEmail,
audience: opts.Audience,
scopes: opts.scopes(),
- pk: pk,
+ signer: signer,
pkID: f.PrivateKeyID,
+ logger: opts.logger(),
}, nil
}
@@ -52,8 +58,9 @@ type selfSignedTokenProvider struct {
email string
audience string
scopes []string
- pk *rsa.PrivateKey
+ signer crypto.Signer
pkID string
+ logger *slog.Logger
}
func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
@@ -73,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(tp.pkID),
}
- msg, err := jwt.EncodeJWS(h, c, tp.pk)
+ tok, err := jwt.EncodeJWS(h, c, tp.signer)
if err != nil {
return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
}
- return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
+ tp.logger.Debug("created self-signed JWT", "token", tok)
+ return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil
}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
index efc91c2b0..d781c3e49 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
@@ -22,7 +22,7 @@ import (
"strings"
"cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
+ "cloud.google.com/go/auth/internal/compute"
"google.golang.org/grpc"
grpcgoogle "google.golang.org/grpc/credentials/google"
)
@@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool {
return true
}
-func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
+func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool {
if tp == nil {
return false
}
@@ -69,6 +69,9 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool
if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
return false
}
+ if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath {
+ return true
+ }
if tok.MetadataString("auth.google.serviceAccount") != "default" {
return false
}
@@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool {
// configuration allows the use of direct path. If it does not the provided
// grpcOpts and endpoint are returned.
func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
- if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
+ if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
grpcOpts = []grpc.DialOption{
grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 0442a5938..4610a4855 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package grpctransport provides functionality for managing gRPC client
+// connections to Google Cloud services.
package grpctransport
import (
@@ -19,16 +21,21 @@ import (
"crypto/tls"
"errors"
"fmt"
+ "log/slog"
"net/http"
+ "os"
+ "sync"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
- "go.opencensus.io/plugin/ocgrpc"
+ "github.com/googleapis/gax-go/v2/internallog"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/stats"
)
const (
@@ -38,7 +45,7 @@ const (
// Check env to decide if using google-c2p resolver for DirectPath traffic.
enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
var (
@@ -46,6 +53,27 @@ var (
timeoutDialerOption grpc.DialOption
)
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: When this module depends on a version of otelgrpc containing the fix,
+// replace this singleton with inline usage for simplicity.
+// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
// ClientCertProvider is a function that returns a TLS client certificate to be
// used when opening TLS connections. It follows the same semantics as
// [crypto/tls.Config.GetClientCertificate].
@@ -90,6 +118,11 @@ type Options struct {
// APIKey specifies an API key to be used as the basis for authentication.
// If set DetectOpts are ignored.
APIKey string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
@@ -105,6 +138,10 @@ func (o *Options) client() *http.Client {
return nil
}
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
func (o *Options) validate() error {
if o == nil {
return errors.New("grpctransport: opts required to be non-nil")
@@ -146,6 +183,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions {
do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
do.TokenURL = credentials.GoogleMTLSTokenURL
}
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
return do
}
@@ -164,6 +204,10 @@ type InternalOptions struct {
EnableDirectPathXds bool
// EnableJWTWithScope specifies if scope can be used with self-signed JWT.
EnableJWTWithScope bool
+ // AllowHardBoundTokens allows libraries to request a hard-bound token.
+ // Obtaining hard-bound tokens requires the connection to be established
+ // using either ALTS or mTLS with S2A.
+ AllowHardBoundTokens []string
// DefaultAudience specifies a default audience to be used as the audience
// field ("aud") for the JWT token authentication.
DefaultAudience string
@@ -214,6 +258,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
@@ -221,13 +266,13 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
tOpts.EnableDirectPath = io.EnableDirectPath
tOpts.EnableDirectPathXds = io.EnableDirectPathXds
}
- transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
+ transportCreds, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
if err != nil {
return nil, err
}
if !secure {
- transportCreds = grpcinsecure.NewCredentials()
+ transportCreds.TransportCredentials = grpcinsecure.NewCredentials()
}
// Initialize gRPC dial options with transport-level security options.
@@ -256,6 +301,18 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
if opts.Credentials != nil {
creds = opts.Credentials
} else {
+ // This condition is only met for non-DirectPath clients because
+ // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath
+ // is false.
+ if transportCreds.TransportType == transport.TransportTypeMTLSS2A {
+ // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A.
+ for _, ev := range opts.InternalOptions.AllowHardBoundTokens {
+ if ev == "MTLS_S2A" {
+ opts.DetectOpts.TokenBindingType = credentials.MTLSHardBinding
+ break
+ }
+ }
+ }
var err error
creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
if err != nil {
@@ -271,7 +328,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
if metadata == nil {
metadata = make(map[string]string, 1)
}
- metadata[quotaProjectHeaderKey] = qp
+ // Don't overwrite user specified quota
+ if _, ok := metadata[quotaProjectHeaderKey]; !ok {
+ metadata[quotaProjectHeaderKey] = qp
+ }
}
grpcOpts = append(grpcOpts,
grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
@@ -280,18 +340,17 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
clientUniverseDomain: opts.UniverseDomain,
}),
)
-
// Attempt Direct Path
- grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds)
+ grpcOpts, transportCreds.Endpoint = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds)
}
// Add tracing, but before the other options, so that clients can override the
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
- grpcOpts = addOCStatsHandler(grpcOpts, opts)
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts)
grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
- return grpc.DialContext(ctx, endpoint, grpcOpts...)
+ return grpc.Dial(transportCreds.Endpoint, grpcOpts...)
}
// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
@@ -325,15 +384,23 @@ type grpcCredentialsProvider struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com". This is the universe domain
-// configured for the client, which will be compared to the universe domain
-// that is separately configured for the credentials.
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
- if c.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if c.clientUniverseDomain != "" {
+ return c.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return c.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
@@ -378,9 +445,9 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
return c.secure
}
-func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
+func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
if opts.DisableTelemetry {
return dialOpts
}
- return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
+ return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
index 969c8d4d2..5758e85b5 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -12,18 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package httptransport provides functionality for managing HTTP client
+// connections to Google Cloud services.
package httptransport
import (
"crypto/tls"
"errors"
"fmt"
+ "log/slog"
"net/http"
"cloud.google.com/go/auth"
detect "cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// ClientCertProvider is a function that returns a TLS client certificate to be
@@ -67,6 +71,11 @@ type Options struct {
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
@@ -99,6 +108,10 @@ func (o *Options) client() *http.Client {
return nil
}
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
func (o *Options) resolveDetectOptions() *detect.DetectOptions {
io := o.InternalOptions
// soft-clone these so we are not updating a ref the user holds and may reuse
@@ -123,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions {
do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
do.TokenURL = detect.GoogleMTLSTokenURL
}
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
return do
}
@@ -145,14 +161,21 @@ type InternalOptions struct {
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
- // internally for clients that needs more control over their transport.
+ // internally for clients that need more control over their transport.
SkipValidation bool
+ // SkipUniverseDomainValidation skips the verification that the universe
+ // domain configured for the client matches the universe domain configured
+ // for the credentials. It should only be used internally for clients that
+ // need more control over their transport. The default is false.
+ SkipUniverseDomainValidation bool
}
// AddAuthorizationMiddleware adds a middleware to the provided client's
// transport that sets the Authorization header with the value produced by the
// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
// if client or creds is nil.
+//
+// This function does not support setting a universe domain value on the client.
func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
if client == nil || creds == nil {
return fmt.Errorf("httptransport: client and tp must not be nil")
@@ -171,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er
client.Transport = &authTransport{
creds: creds,
base: base,
- // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
}
return nil
}
@@ -189,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) {
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go
deleted file mode 100644
index 467c477c0..000000000
--- a/vendor/cloud.google.com/go/auth/httptransport/trace.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- cloudTraceHeader = `X-Cloud-Trace-Context`
-)
-
-// asserts the httpFormat fulfills this foreign interface
-var _ propagation.HTTPFormat = (*httpFormat)(nil)
-
-// httpFormat implements propagation.httpFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
-type httpFormat struct{}
-
-// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
-func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(cloudTraceHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 32)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Cloud Trace header.
-func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(cloudTraceHeader, header)
-}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
index 07eea4744..ee215b6dc 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -19,6 +19,7 @@ import (
"crypto/tls"
"net"
"net/http"
+ "os"
"time"
"cloud.google.com/go/auth"
@@ -26,12 +27,12 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
- "go.opencensus.io/plugin/ochttp"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
const (
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
@@ -41,7 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers: headers,
}
var trans http.RoundTripper = ht
- trans = addOCTransport(trans, opts)
+ trans = addOpenTelemetryTransport(trans, opts)
switch {
case opts.DisableAuthentication:
// Do nothing.
@@ -76,13 +77,21 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
if headers == nil {
headers = make(map[string][]string, 1)
}
- headers.Set(quotaProjectHeaderKey, qp)
+ // Don't overwrite user specified quota
+ if v := headers.Get(quotaProjectHeaderKey); v == "" {
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ }
+ var skipUD bool
+ if iOpts := opts.InternalOptions; iOpts != nil {
+ skipUD = iOpts.SkipUniverseDomainValidation
}
creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
trans = &authTransport{
- base: trans,
- creds: creds,
- clientUniverseDomain: opts.UniverseDomain,
+ base: trans,
+ creds: creds,
+ clientUniverseDomain: opts.UniverseDomain,
+ skipUniverseDomainValidation: skipUD,
}
}
return trans, nil
@@ -94,7 +103,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
// http.DefaultTransport.
// If TLSCertificate is available, set TLSClientConfig as well.
func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
- trans := http.DefaultTransport.(*http.Transport).Clone()
+ defaultTransport, ok := http.DefaultTransport.(*http.Transport)
+ if !ok {
+ defaultTransport = transport.BaseTransport()
+ }
+ trans := defaultTransport.Clone()
trans.MaxIdleConnsPerHost = 100
if clientCertSource != nil {
@@ -155,29 +168,37 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTrip(&newReq)
}
-func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
}
- return &ochttp.Transport{
- Base: trans,
- Propagation: &httpFormat{},
- }
+ return otelhttp.NewTransport(trans)
}
type authTransport struct {
- creds *auth.Credentials
- base http.RoundTripper
- clientUniverseDomain string
+ creds *auth.Credentials
+ base http.RoundTripper
+ clientUniverseDomain string
+ skipUniverseDomainValidation bool
}
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return t.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
// RoundTrip authorizes and authenticates the request with an
@@ -197,7 +218,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if err != nil {
return nil, err
}
- if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
if err != nil {
return nil, err
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
new file mode 100644
index 000000000..05c7e8bdd
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var (
+ vmOnGCEOnce sync.Once
+ vmOnGCE bool
+)
+
+// OnComputeEngine returns whether the client is running on GCE.
+//
+// This is a copy of the gRPC internal googlecloud.OnGCE() func at:
+// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go
+// The functionality is similar to the metadata.OnGCE() func at:
+// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go
+// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server.
+// In particular, OnComputeEngine() will return false on Serverless.
+func OnComputeEngine() bool {
+ vmOnGCEOnce.Do(func() {
+ mf, err := manufacturer()
+ if err != nil {
+ log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err)
+ return
+ }
+ vmOnGCE = isRunningOnGCE(mf, runtime.GOOS)
+ })
+ return vmOnGCE
+}
+
+// isRunningOnGCE checks whether the local system, without doing a network request, is
+// running on GCP.
+func isRunningOnGCE(manufacturer []byte, goos string) bool {
+ name := string(manufacturer)
+ switch goos {
+ case "linux":
+ name = strings.TrimSpace(name)
+ return name == "Google" || name == "Google Compute Engine"
+ case "windows":
+ name = strings.Replace(name, " ", "", -1)
+ name = strings.Replace(name, "\n", "", -1)
+ name = strings.Replace(name, "\r", "", -1)
+ return name == "Google"
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
similarity index 68%
rename from vendor/github.com/go-openapi/swag/post_go18.go
rename to vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
index f5228b82c..af490bf4f 100644
--- a/vendor/github.com/go-openapi/swag/post_go18.go
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
@@ -1,10 +1,13 @@
-// Copyright 2015 go-swagger maintainers
+//go:build !(linux || windows)
+// +build !linux,!windows
+
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,13 +15,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build go1.8
-// +build go1.8
-
-package swag
-
-import "net/url"
+package compute
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
+func manufacturer() ([]byte, error) {
+ return nil, nil
}
diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
similarity index 67%
rename from vendor/github.com/go-openapi/swag/pre_go18.go
rename to vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
index 2757d9b95..d92178df8 100644
--- a/vendor/github.com/go-openapi/swag/pre_go18.go
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
@@ -1,10 +1,10 @@
-// Copyright 2015 go-swagger maintainers
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !go1.8
-// +build !go1.8
+package compute
-package swag
+import "os"
-import "net/url"
+const linuxProductNameFile = "/sys/class/dmi/id/product_name"
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
+func manufacturer() ([]byte, error) {
+ return os.ReadFile(linuxProductNameFile)
}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
new file mode 100644
index 000000000..16be9df30
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "errors"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ windowsCheckCommand = "powershell.exe"
+ windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
+ powershellOutputFilter = "Manufacturer"
+ windowsManufacturerRegex = ":(.*)"
+)
+
+func manufacturer() ([]byte, error) {
+ cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
+ out, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
+ if strings.HasPrefix(line, powershellOutputFilter) {
+ re := regexp.MustCompile(windowsManufacturerRegex)
+ name := re.FindString(line)
+ name = strings.TrimLeft(name, ":")
+ return []byte(name), nil
+ }
+ }
+ return nil, errors.New("cannot determine the machine's manufacturer")
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
index 4308345ed..6a8eab6eb 100644
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -16,7 +16,7 @@ package internal
import (
"context"
- "crypto/rsa"
+ "crypto"
"crypto/x509"
"encoding/json"
"encoding/pem"
@@ -38,8 +38,11 @@ const (
// QuotaProjectEnvVar is the environment variable for setting the quota
// project.
QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
@@ -69,25 +72,27 @@ func DefaultClient() *http.Client {
}
// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
+// to an crypto.Signer. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+func ParseKey(key []byte) (crypto.Signer, error) {
block, _ := pem.Decode(key)
if block != nil {
key = block.Bytes
}
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ var parsedKey crypto.PrivateKey
+ var err error
+ parsedKey, err = x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
}
}
- parsed, ok := parsedKey.(*rsa.PrivateKey)
+ parsed, ok := parsedKey.(crypto.Signer)
if !ok {
- return nil, errors.New("private key is invalid")
+ return nil, errors.New("private key is not a signer")
}
return parsed, nil
}
@@ -176,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) {
// ComputeUniverseDomainProvider fetches the credentials universe domain from
// the google cloud metadata service.
type ComputeUniverseDomainProvider struct {
+ MetadataClient *metadata.Client
universeDomainOnce sync.Once
universeDomain string
universeDomainErr error
@@ -185,7 +191,7 @@ type ComputeUniverseDomainProvider struct {
// metadata service.
func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
c.universeDomainOnce.Do(func() {
- c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
+ c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient)
})
if c.universeDomainErr != nil {
return "", c.universeDomainErr
@@ -194,14 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string
}
// httpGetMetadataUniverseDomain is a package var for unit test substitution.
-var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
+var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
- return metadata.GetWithContext(ctx, "universe/universe_domain")
+ return client.GetWithContext(ctx, "universe/universe-domain")
}
-func getMetadataUniverseDomain(ctx context.Context) (string, error) {
- universeDomain, err := httpGetMetadataUniverseDomain(ctx)
+func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) {
+ universeDomain, err := httpGetMetadataUniverseDomain(ctx, client)
if err == nil {
return universeDomain, nil
}
@@ -211,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) {
}
return "", err
}
+
+// FormatIAMServiceAccountResource sets a service account name in an IAM resource
+// name.
+func FormatIAMServiceAccountResource(name string) string {
+ return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
index dc28b3c3b..9bd55f510 100644
--- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
+++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
@@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) {
}
// EncodeJWS encodes the data using the provided key as a JSON web signature.
-func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
+func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
@@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
ss := fmt.Sprintf("%s.%s", head, claims)
h := sha256.New()
h.Write([]byte(ss))
- sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+ sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256)
if err != nil {
return "", err
}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
index d94e0af08..b1f0fcf93 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -17,7 +17,10 @@ package transport
import (
"context"
"crypto/tls"
+ "crypto/x509"
"errors"
+ "log"
+ "log/slog"
"net"
"net/http"
"net/url"
@@ -44,11 +47,19 @@ const (
googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+
+ mtlsMDSRoot = "/run/google-mds-mtls/root.crt"
+ mtlsMDSKey = "/run/google-mds-mtls/client.key"
)
-var (
- mdsMTLSAutoConfigSource mtlsConfigSource
- errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
+// Type represents the type of transport used.
+type Type int
+
+const (
+ // TransportTypeUnknown represents an unknown transport type and is the default option.
+ TransportTypeUnknown Type = iota
+ // TransportTypeMTLSS2A represents the mTLS transport type using S2A.
+ TransportTypeMTLSS2A
)
// Options is a struct that is duplicated information from the individual
@@ -56,13 +67,14 @@ var (
// fields on httptransport.Options and grpctransport.Options.
type Options struct {
Endpoint string
- DefaultMTLSEndpoint string
DefaultEndpointTemplate string
+ DefaultMTLSEndpoint string
ClientCertProvider cert.Provider
Client *http.Client
UniverseDomain string
EnableDirectPath bool
EnableDirectPathXds bool
+ Logger *slog.Logger
}
// getUniverseDomain returns the default service domain for a given Cloud
@@ -90,6 +102,16 @@ func (o *Options) defaultEndpoint() string {
return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
}
+// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the
+// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultMTLSEndpoint() string {
+ if o.DefaultMTLSEndpoint == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
// default endpoint.
func (o *Options) mergedEndpoint() (string, error) {
@@ -108,20 +130,44 @@ func fixScheme(baseURL string) string {
return baseURL
}
+// GRPCTransportCredentials embeds interface TransportCredentials with additional data.
+type GRPCTransportCredentials struct {
+ credentials.TransportCredentials
+ Endpoint string
+ TransportType Type
+}
+
// GetGRPCTransportCredsAndEndpoint returns an instance of
// [google.golang.org/grpc/credentials.TransportCredentials], and the
-// corresponding endpoint to use for GRPC client.
-func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
+// corresponding endpoint and transport type to use for GRPC client.
+func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) {
config, err := getTransportConfig(opts)
if err != nil {
- return nil, "", err
+ return nil, err
}
defaultTransportCreds := credentials.NewTLS(&tls.Config{
GetClientCertificate: config.clientCertSource,
})
- if config.s2aAddress == "" {
- return defaultTransportCreds, config.endpoint, nil
+
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
var fallbackOpts *s2a.FallbackOptions
@@ -133,14 +179,15 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede
}
s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
})
if err != nil {
// Use default if we cannot initialize S2A client transport credentials.
- return defaultTransportCreds, config.endpoint, nil
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
- return s2aTransportCreds, config.s2aMTLSEndpoint, nil
+ return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil
}
// GetHTTPTransportConfig returns a client certificate source and a function for
@@ -151,7 +198,23 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
return nil, nil, err
}
- if config.s2aAddress == "" {
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
return config.clientCertSource, nil, nil
}
@@ -169,12 +232,38 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
}
dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
})
return nil, dialTLSContextFunc, nil
}
+func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) {
+ rootPEM, err := os.ReadFile(mtlsMDSRootFile)
+ if err != nil {
+ return nil, err
+ }
+ caCertPool := x509.NewCertPool()
+ ok := caCertPool.AppendCertsFromPEM(rootPEM)
+ if !ok {
+ return nil, errors.New("failed to load MTLS MDS root certificate")
+ }
+ // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain
+ // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the
+ // tls.X509KeyPair function as both the certificate chain and private key arguments.
+ cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := tls.Config{
+ RootCAs: caCertPool,
+ Certificates: []tls.Certificate{cert},
+ MinVersion: tls.VersionTLS13,
+ }
+ return credentials.NewTLS(&tlsConfig), nil
+}
+
func getTransportConfig(opts *Options) (*transportConfig, error) {
clientCertSource, err := GetClientCertificateProvider(opts)
if err != nil {
@@ -192,21 +281,18 @@ func getTransportConfig(opts *Options) (*transportConfig, error) {
if !shouldUseS2A(clientCertSource, opts) {
return &defaultTransportConfig, nil
}
- if !opts.isUniverseDomainGDU() {
- return nil, errUniverseNotSupportedMTLS
- }
-
- s2aMTLSEndpoint := opts.DefaultMTLSEndpoint
- s2aAddress := GetS2AAddress()
- if s2aAddress == "" {
+ s2aAddress := GetS2AAddress(opts.Logger)
+ mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger)
+ if s2aAddress == "" && mtlsS2AAddress == "" {
return &defaultTransportConfig, nil
}
return &transportConfig{
clientCertSource: clientCertSource,
endpoint: endpoint,
s2aAddress: s2aAddress,
- s2aMTLSEndpoint: s2aMTLSEndpoint,
+ mtlsS2AAddress: mtlsS2AAddress,
+ s2aMTLSEndpoint: opts.defaultMTLSEndpoint(),
}, nil
}
@@ -241,8 +327,10 @@ type transportConfig struct {
clientCertSource cert.Provider
// The corresponding endpoint to use based on client certificate source.
endpoint string
- // The S2A address if it can be used, otherwise an empty string.
+ // The plaintext S2A address if it can be used, otherwise an empty string.
s2aAddress string
+ // The MTLS S2A address if it can be used, otherwise an empty string.
+ mtlsS2AAddress string
// The MTLS endpoint to use with S2A.
s2aMTLSEndpoint string
}
@@ -250,24 +338,23 @@ type transportConfig struct {
// getEndpoint returns the endpoint for the service, taking into account the
// user-provided endpoint override "settings.Endpoint".
//
-// If no endpoint override is specified, we will either return the default endpoint or
-// the default mTLS endpoint if a client certificate is available.
+// If no endpoint override is specified, we will either return the default
+// endpoint or the default mTLS endpoint if a client certificate is available.
//
-// You can override the default endpoint choice (mtls vs. regular) by setting the
-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
+// You can override the default endpoint choice (mTLS vs. regular) by setting
+// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
//
// If the endpoint override is an address (host:port) rather than full base
// URL (ex. https://...), then the user-provided address will be merged into
// the default endpoint. For example, WithEndpoint("myhost:8000") and
-// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
+// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return
+// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS
+// endpoint.
func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
if opts.Endpoint == "" {
mtlsMode := getMTLSMode()
if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
- if !opts.isUniverseDomainGDU() {
- return "", errUniverseNotSupportedMTLS
- }
- return opts.DefaultMTLSEndpoint, nil
+ return opts.defaultMTLSEndpoint(), nil
}
return opts.defaultEndpoint(), nil
}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
index 366515916..6c954ae19 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
@@ -16,7 +16,6 @@ package cert
import (
"crypto/tls"
- "errors"
"github.com/googleapis/enterprise-certificate-proxy/client"
)
@@ -37,10 +36,9 @@ type ecpSource struct {
func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
key, err := client.Cred(configFilePath)
if err != nil {
- if errors.Is(err, client.ErrCredUnavailable) {
- return nil, errSourceUnavailable
- }
- return nil, err
+ // TODO(codyoss): once this is fixed upstream can handle this error a
+ // little better here. But be safe for now and assume unavailable.
+ return nil, errSourceUnavailable
}
return (&ecpSource{
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
index 3227aba28..738cb2161 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
@@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) {
file, err := os.ReadFile(configFilePath)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- // Config file missing means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- return nil, err
+ // Config file missing means Secure Connect is not supported.
+ // There are non-os.ErrNotExist errors that may be returned.
+ // (e.g. if the home directory is /dev/null, *nix systems will
+ // return ENOTDIR instead of ENOENT)
+ return nil, errSourceUnavailable
}
var metadata secureConnectMetadata
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
index e8675bf82..347aaced7 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
@@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo)
func getCertAndKeyFiles(configFilePath string) (string, string, error) {
jsonFile, err := os.Open(configFilePath)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- return "", "", errSourceUnavailable
- }
- return "", "", err
+ return "", "", errSourceUnavailable
}
byteValue, err := io.ReadAll(jsonFile)
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
index 2ed532deb..a63309956 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
@@ -15,12 +15,14 @@
package transport
import (
+ "context"
"encoding/json"
+ "fmt"
"log"
+ "log/slog"
"os"
"strconv"
"sync"
- "time"
"cloud.google.com/go/auth/internal/transport/cert"
"cloud.google.com/go/compute/metadata"
@@ -31,41 +33,38 @@ const (
)
var (
- // The period an MTLS config can be reused before needing refresh.
- configExpiry = time.Hour
+ mtlsConfiguration *mtlsConfig
- // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source.
mtlsOnce sync.Once
)
// GetS2AAddress returns the S2A address to be reached via plaintext connection.
// Returns empty string if not set or invalid.
-func GetS2AAddress() string {
- c, err := getMetadataMTLSAutoConfig().Config()
- if err != nil {
- return ""
- }
- if !c.Valid() {
+func GetS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
return ""
}
- return c.S2A.PlaintextAddress
+ return mtlsConfiguration.S2A.PlaintextAddress
}
-type mtlsConfigSource interface {
- Config() (*mtlsConfig, error)
+// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection.
+// Returns empty string if not set or invalid.
+func GetMTLSS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.MTLSAddress
}
// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
type mtlsConfig struct {
- S2A *s2aAddresses `json:"s2a"`
- Expiry time.Time
+ S2A *s2aAddresses `json:"s2a"`
}
-func (c *mtlsConfig) Valid() bool {
- return c != nil && c.S2A != nil && !c.expired()
-}
-func (c *mtlsConfig) expired() bool {
- return c.Expiry.Before(time.Now())
+func (c *mtlsConfig) valid() bool {
+ return c != nil && c.S2A != nil
}
// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
@@ -76,80 +75,39 @@ type s2aAddresses struct {
MTLSAddress string `json:"mtls_address"`
}
-// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh.
-func getMetadataMTLSAutoConfig() mtlsConfigSource {
+func getMetadataMTLSAutoConfig(logger *slog.Logger) {
+ var err error
mtlsOnce.Do(func() {
- mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{
- src: &metadataMTLSAutoConfig{},
+ mtlsConfiguration, err = queryConfig(logger)
+ if err != nil {
+ log.Printf("Getting MTLS config failed: %v", err)
}
})
- return mdsMTLSAutoConfigSource
-}
-
-// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry.
-// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig.
-type reuseMTLSConfigSource struct {
- src mtlsConfigSource // src.Config() is called when config is expired
- mu sync.Mutex // mutex guards config
- config *mtlsConfig // cached config
-}
-
-func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
-
- if cs.config.Valid() {
- return cs.config, nil
- }
- c, err := cs.src.Config()
- if err != nil {
- return nil, err
- }
- cs.config = c
- return c, nil
}
-// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource
-// It has the logic to query MDS and return an mtlsConfig
-type metadataMTLSAutoConfig struct{}
-
-var httpGetMetadataMTLSConfig = func() (string, error) {
- return metadata.Get(configEndpointSuffix)
+var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: logger,
+ })
+ return metadataClient.GetWithContext(context.Background(), configEndpointSuffix)
}
-func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) {
- resp, err := httpGetMetadataMTLSConfig()
+func queryConfig(logger *slog.Logger) (*mtlsConfig, error) {
+ resp, err := httpGetMetadataMTLSConfig(logger)
if err != nil {
- log.Printf("querying MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err)
}
var config mtlsConfig
err = json.Unmarshal([]byte(resp), &config)
if err != nil {
- log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err)
}
-
if config.S2A == nil {
- log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config)
}
-
- // set new expiry
- config.Expiry = time.Now().Add(configExpiry)
return &config, nil
}
-func defaultMTLSConfig() *mtlsConfig {
- return &mtlsConfig{
- S2A: &s2aAddresses{
- PlaintextAddress: "",
- MTLSAddress: "",
- },
- Expiry: time.Now().Add(configExpiry),
- }
-}
-
func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
// If client cert is found, use that over S2A.
if clientCertSource != nil {
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
index 718a6b171..5c8721efa 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
@@ -37,6 +37,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
}
newDo := &credentials.DetectOptions{
// Simple types
+ TokenBindingType: oldDo.TokenBindingType,
Audience: oldDo.Audience,
Subject: oldDo.Subject,
EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
@@ -46,9 +47,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
UniverseDomain: oldDo.UniverseDomain,
- // These fields are are pointer types that we just want to use exactly
- // as the user set, copy the ref
+ // These fields are pointer types that we just want to use exactly as
+ // the user set, copy the ref
Client: oldDo.Client,
+ Logger: oldDo.Logger,
AuthHandlerOptions: oldDo.AuthHandlerOptions,
}
@@ -81,12 +83,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri
// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS.
func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client {
- trans := baseTransport()
+ trans := BaseTransport()
trans.TLSClientConfig = tlsConfig
return &http.Client{Transport: trans}
}
-func baseTransport() *http.Transport {
+// BaseTransport returns a default [http.Transport] which can be used if
+// [http.DefaultTransport] has been overwritten.
+func BaseTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
index f75c8e204..42716752e 100644
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
@@ -1,5 +1,40 @@
# Changelog
+## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161)
+
+## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52))
+
+## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10)
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
index 9835ac571..9cc33e5ee 100644
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
@@ -26,6 +26,13 @@ import (
"golang.org/x/oauth2/google"
)
+const (
+ oauth2TokenSourceKey = "oauth2.google.tokenSource"
+ oauth2ServiceAccountKey = "oauth2.google.serviceAccount"
+ authTokenSourceKey = "auth.google.tokenSource"
+ authServiceAccountKey = "auth.google.serviceAccount"
+)
+
// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
// into a [cloud.google.com/go/auth.TokenProvider].
func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
@@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
}
return nil, err
}
+ // Preserve compute token metadata, for both types of tokens.
+ metadata := map[string]interface{}{}
+ if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok {
+ metadata[authTokenSourceKey] = val
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok {
+ metadata[authServiceAccountKey] = val
+ metadata[oauth2ServiceAccountKey] = val
+ }
return &auth.Token{
- Value: tok.AccessToken,
- Type: tok.Type(),
- Expiry: tok.Expiry,
+ Value: tok.AccessToken,
+ Type: tok.Type(),
+ Expiry: tok.Expiry,
+ Metadata: metadata,
}, nil
}
@@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
}
return nil, err
}
- return &oauth2.Token{
+ tok2 := &oauth2.Token{
AccessToken: tok.Value,
TokenType: tok.Type,
Expiry: tok.Expiry,
- }, nil
+ }
+ // Preserve token metadata.
+ m := tok.Metadata
+ if m != nil {
+ // Copy map to avoid concurrent map writes error (#11161).
+ metadata := make(map[string]interface{}, len(m)+2)
+ for k, v := range m {
+ metadata[k] = v
+ }
+ // Append compute token metadata in converted form.
+ if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" {
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" {
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ tok2 = tok2.WithExtra(metadata)
+ }
+ return tok2, nil
}
// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go
index 97a57f469..07804dc16 100644
--- a/vendor/cloud.google.com/go/auth/threelegged.go
+++ b/vendor/cloud.google.com/go/auth/threelegged.go
@@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"mime"
"net/http"
"net/url"
@@ -28,6 +29,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
@@ -69,6 +71,11 @@ type Options3LO struct {
// AuthHandlerOpts provides a set of options for doing a
// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
AuthHandlerOpts *AuthorizationHandlerOptions
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options3LO) validate() error {
@@ -96,6 +103,10 @@ func (o *Options3LO) validate() error {
return nil
}
+func (o *Options3LO) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
// PKCEOptions holds parameters to support PKCE.
type PKCEOptions struct {
// Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
@@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin
if o.AuthStyle == StyleInHeader {
req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
}
+ logger := o.logger()
+ logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
// Make request
resp, body, err := internal.DoRequest(o.client(), req)
if err != nil {
return nil, refreshToken, err
}
+ logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body))
failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299
tokError := &Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index da7db19b1..bcfb5d816 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13)
+
+
+### Features
+
+* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f))
+
## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go
new file mode 100644
index 000000000..8ec673b88
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/log.go
@@ -0,0 +1,149 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+)
+
+// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog
+// to avoid the dependency. The compute/metadata module is used by too many
+// non-client library modules that can't justify the dependency.
+
+// The handler returned if logging is not enabled.
+type noOpHandler struct{}
+
+func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
+ return false
+}
+
+func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
+ return nil
+}
+
+func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
+ return h
+}
+
+func (h noOpHandler) WithGroup(_ string) slog.Handler {
+ return h
+}
+
+// httpRequest returns a lazily evaluated [slog.LogValuer] for a
+// [http.Request] and the associated body.
+func httpRequest(req *http.Request, body []byte) slog.LogValuer {
+ return &request{
+ req: req,
+ payload: body,
+ }
+}
+
+type request struct {
+ req *http.Request
+ payload []byte
+}
+
+func (r *request) LogValue() slog.Value {
+ if r == nil || r.req == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
+ groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.req.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+// httpResponse returns a lazily evaluated [slog.LogValuer] for a
+// [http.Response] and the associated body.
+func httpResponse(resp *http.Response, body []byte) slog.LogValuer {
+ return &response{
+ resp: resp,
+ payload: body,
+ }
+}
+
+type response struct {
+ resp *http.Response
+ payload []byte
+}
+
+func (r *response) LogValue() slog.Value {
+ if r == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.resp.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+func processPayload(payload []byte) (slog.Attr, bool) {
+ peekChar := payload[0]
+ if peekChar == '{' {
+ // JSON object
+ var m map[string]any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else if peekChar == '[' {
+ // JSON array
+ var m []any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else {
+ // Everything else
+ buf := &bytes.Buffer{}
+ if err := json.Compact(buf, payload); err != nil {
+ // Write raw payload incase of error
+ buf.Write(payload)
+ }
+ return slog.String("payload", buf.String()), true
+ }
+ return slog.Attr{}, false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index c160b4786..4c18a383a 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -24,6 +24,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net"
"net/http"
"net/url"
@@ -60,7 +61,10 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: newDefaultHTTPClient()}
+var defaultClient = &Client{
+ hc: newDefaultHTTPClient(),
+ logger: slog.New(noOpHandler{}),
+}
func newDefaultHTTPClient() *http.Client {
return &http.Client{
@@ -408,17 +412,42 @@ func strsContains(ss []string, s string) bool {
// A Client provides metadata.
type Client struct {
- hc *http.Client
+ hc *http.Client
+ logger *slog.Logger
+}
+
+// Options for configuring a [Client].
+type Options struct {
+ // Client is the HTTP client used to make requests. Optional.
+ Client *http.Client
+ // Logger is used to log information about HTTP request and responses.
+ // If not provided, nothing will be logged. Optional.
+ Logger *slog.Logger
}
// NewClient returns a Client that can be used to fetch metadata.
// Returns the client that uses the specified http.Client for HTTP requests.
// If nil is specified, returns the default client.
func NewClient(c *http.Client) *Client {
- if c == nil {
+ return NewWithOptions(&Options{
+ Client: c,
+ })
+}
+
+// NewWithOptions returns a Client that is configured with the provided Options.
+func NewWithOptions(opts *Options) *Client {
+ if opts == nil {
return defaultClient
}
- return &Client{hc: c}
+ client := opts.Client
+ if client == nil {
+ client = newDefaultHTTPClient()
+ }
+ logger := opts.Logger
+ if logger == nil {
+ logger = slog.New(noOpHandler{})
+ }
+ return &Client{hc: client, logger: logger}
}
// getETag returns a value from the metadata service as well as the associated ETag.
@@ -448,12 +477,21 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
req.Header.Set("User-Agent", userAgent)
var res *http.Response
var reqErr error
+ var body []byte
retryer := newRetryer()
for {
+ c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil))
res, reqErr = c.hc.Do(req)
var code int
if res != nil {
code = res.StatusCode
+ body, err = io.ReadAll(res.Body)
+ if err != nil {
+ res.Body.Close()
+ return "", "", err
+ }
+ c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body))
+ res.Body.Close()
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
if res != nil && res.Body != nil {
@@ -469,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if reqErr != nil {
return "", "", reqErr
}
- defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := io.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
if res.StatusCode != 200 {
- return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ return "", "", &Error{Code: res.StatusCode, Message: string(body)}
}
- return string(all), res.Header.Get("Etag"), nil
+ return string(body), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b78..33c88305c 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45c9..78bddf1ce 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a40..78f95f256 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13bba..118e49e81 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5fd..05f5e7dfe 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638fd8..cf9d42aed 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@ import (
//
// See https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 000000000..22f8d21cc
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff..0108f1d57 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853de..d970c7cf4 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+ notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -363,6 +382,128 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
+ case '{':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err = drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
@@ -377,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
index d69b53acc..c4b1b64f0 100644
--- a/vendor/github.com/go-openapi/swag/.gitignore
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e400..80e2be004 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 000000000..e7f28ed6b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
index 217f6fa50..a72922299 100644
--- a/vendor/github.com/go-openapi/swag/README.md
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 000000000..20a359bb6
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 00038c377..783442fdd 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb..8bb64ac32 100644
--- a/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c08..000000000
--- a/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377..000000000
--- a/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
index a1825fb7d..274727a86 100644
--- a/vendor/github.com/go-openapi/swag/split.go
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 000000000..90745d5ca
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index d971fbe34..5051401c4 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
- result := ""
- for _, lexem := range lexems {
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -343,7 +323,7 @@ type zeroable interface {
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
@@ -356,7 +336,7 @@ func IsZero(data interface{}) bool {
}
// continue with slightly more complex reflection
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
-}
-
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f..f59e02593 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
index d127d4362..def01a6be 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -19,6 +19,7 @@ const (
tbFunc // func(T) bool
ttbFunc // func(T, T) bool
+ ttiFunc // func(T, T) int
trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
@@ -28,11 +29,13 @@ const (
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
+ Compare = ttiFunc // func(T, T) int
ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
@@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
+ case ttiFunc: // func(T, T) int
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+ return true
+ }
case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType {
return true
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index 754496f3b..ba3fce81f 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
- if _, ok := reflect.New(t).Interface().(error); ok {
+ isProtoMessage := func(t reflect.Type) bool {
+ m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+ return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+ m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+ m.Type.Out(0).Name() == "Message"
+ }
+ if isProtoMessage(t) {
+ help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+ } else if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go
index 548f31da2..f47c77a2b 100644
--- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go
+++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go
@@ -297,6 +297,8 @@ const (
ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4
// Internal use only.
ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5
+ // Internal use only.
+ ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6
)
// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode.
@@ -308,6 +310,7 @@ var (
3: "RESERVED_CUSTOM_VERIFICATION_MODE_3",
4: "RESERVED_CUSTOM_VERIFICATION_MODE_4",
5: "RESERVED_CUSTOM_VERIFICATION_MODE_5",
+ 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6",
}
ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{
"UNSPECIFIED": 0,
@@ -316,6 +319,7 @@ var (
"RESERVED_CUSTOM_VERIFICATION_MODE_3": 3,
"RESERVED_CUSTOM_VERIFICATION_MODE_4": 4,
"RESERVED_CUSTOM_VERIFICATION_MODE_5": 5,
+ "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6,
}
)
@@ -1978,8 +1982,8 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{
0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4,
- 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d,
+ 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43,
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52,
0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e,
@@ -2013,7 +2017,7 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{
0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52,
0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73,
0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50,
0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49,
0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54,
@@ -2025,141 +2029,143 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{
0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27,
0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f,
0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
- 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f,
- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65,
- 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
- 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65,
- 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
- 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
- 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a,
- 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32,
- 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
- 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68,
- 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61,
- 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65,
- 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69,
- 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19,
- 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12,
- 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61,
- 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61,
- 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f,
- 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66,
- 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65,
- 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65,
- 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f,
+ 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52,
+ 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46,
+ 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06,
+ 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2,
+ 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52,
+ 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f,
+ 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52,
+ 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32,
+ 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50,
+ 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43,
+ 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52,
+ 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74,
+ 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61,
+ 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72,
+ 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00,
+ 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74,
+ 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61,
+ 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65,
+ 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
+ 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32,
+ 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71,
+ 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72,
+ 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
+ 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65,
+ 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48,
+ 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66,
+ 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69,
+ 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50,
+ 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f,
+ 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f,
0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72,
- 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a,
+ 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e,
+ 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32,
+ 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65,
- 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04,
- 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67,
- 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69,
- 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66,
- 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f,
- 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
- 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01,
- 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66,
- 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
- 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00,
- 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76,
- 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65,
- 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65,
- 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f,
- 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53,
- 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50,
- 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41,
- 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b,
- 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d,
- 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41,
- 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12,
- 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
- 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32,
- 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49,
- 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36,
- 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53,
+ 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42,
+ 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03,
+ 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72,
+ 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f,
+ 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49,
+ 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41,
+ 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c,
+ 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f,
+ 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f,
+ 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43,
+ 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53,
0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53,
- 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33,
- 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f,
- 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35,
- 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a,
- 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53,
- 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35,
- 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53,
- 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45,
- 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41,
- 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53,
- 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12,
- 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
- 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70,
- 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32,
- 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01,
- 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32,
- 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
+ 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32,
+ 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f,
+ 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33,
+ 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a,
+ 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43,
+ 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48,
+ 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53,
+ 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52,
+ 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20,
+ 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41,
+ 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34,
+ 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49,
+ 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f,
+ 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f,
+ 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39,
+ 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e,
+ 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
index a6402ee48..0cc78547e 100644
--- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
+++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
@@ -64,13 +64,13 @@ type s2av2TransportCreds struct {
localIdentities []*commonpb.Identity
verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode
fallbackClientHandshake fallback.ClientHandshake
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
+ getS2AStream stream.GetS2AStream
serverAuthorizationPolicy []byte
}
// NewClientCreds returns a client-side transport credentials object that uses
// the S2Av2 to establish a secure connection with a server.
-func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) {
+func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) {
// Create an AccessTokenManager instance to use to authenticate to S2Av2.
accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
@@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre
// NewServerCreds returns a server-side transport credentials object that uses
// the S2Av2 to establish a secure connection with a client.
-func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) {
+func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) {
// Create an AccessTokenManager instance to use to authenticate to S2Av2.
accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
creds := &s2av2TransportCreds{
@@ -306,8 +306,9 @@ func NewClientTLSConfig(
tokenManager tokenmanager.AccessTokenManager,
verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode,
serverName string,
- serverAuthorizationPolicy []byte) (*tls.Config, error) {
- s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil)
+ serverAuthorizationPolicy []byte,
+ getStream stream.GetS2AStream) (*tls.Config, error) {
+ s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream)
if err != nil {
grpclog.Infof("Failed to connect to S2Av2: %v", err)
return nil, err
@@ -350,7 +351,7 @@ func (x s2AGrpcStream) CloseSend() error {
return x.stream.CloseSend()
}
-func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) {
+func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) {
if getS2AStream != nil {
return getS2AStream(ctx, s2av2Address)
}
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go
index fa0002e36..6ca75f560 100644
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go
+++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go
@@ -75,7 +75,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr
return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
}
- // Extract TLS configiguration from SessionResp.
+ // Extract TLS configuration from SessionResp.
tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration()
var cert tls.Certificate
diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go
index cc79bd09a..c52fccddf 100644
--- a/vendor/github.com/google/s2a-go/s2a.go
+++ b/vendor/github.com/google/s2a-go/s2a.go
@@ -35,6 +35,7 @@ import (
"github.com/google/s2a-go/internal/tokenmanager"
"github.com/google/s2a-go/internal/v2"
"github.com/google/s2a-go/retry"
+ "github.com/google/s2a-go/stream"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/protobuf/proto"
@@ -330,6 +331,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err
tokenManager: nil,
verificationMode: getVerificationMode(opts.VerificationMode),
serverAuthorizationPolicy: opts.serverAuthorizationPolicy,
+ getStream: opts.getS2AStream,
}, nil
}
return &s2aTLSClientConfigFactory{
@@ -338,6 +340,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err
tokenManager: tokenManager,
verificationMode: getVerificationMode(opts.VerificationMode),
serverAuthorizationPolicy: opts.serverAuthorizationPolicy,
+ getStream: opts.getS2AStream,
}, nil
}
@@ -347,6 +350,7 @@ type s2aTLSClientConfigFactory struct {
tokenManager tokenmanager.AccessTokenManager
verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode
serverAuthorizationPolicy []byte
+ getStream stream.GetS2AStream
}
func (f *s2aTLSClientConfigFactory) Build(
@@ -355,7 +359,7 @@ func (f *s2aTLSClientConfigFactory) Build(
if opts != nil && opts.ServerName != "" {
serverName = opts.ServerName
}
- return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy)
+ return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream)
}
func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode {
@@ -370,6 +374,8 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate
return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4
case ReservedCustomVerificationMode5:
return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5
+ case ReservedCustomVerificationMode6:
+ return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6
default:
return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED
}
diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go
index 5bbf31bf4..b7a277f9e 100644
--- a/vendor/github.com/google/s2a-go/s2a_options.go
+++ b/vendor/github.com/google/s2a-go/s2a_options.go
@@ -19,7 +19,6 @@
package s2a
import (
- "context"
"crypto/tls"
"errors"
"sync"
@@ -28,7 +27,7 @@ import (
"github.com/google/s2a-go/stream"
"google.golang.org/grpc/credentials"
- s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto"
+ s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto"
s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto"
)
@@ -36,6 +35,17 @@ import (
type Identity interface {
// Name returns the name of the identity.
Name() string
+ Attributes() map[string]string
+}
+
+type UnspecifiedID struct {
+ Attr map[string]string
+}
+
+func (u *UnspecifiedID) Name() string { return "" }
+
+func (u *UnspecifiedID) Attributes() map[string]string {
+ return u.Attr
}
type spiffeID struct {
@@ -44,10 +54,10 @@ type spiffeID struct {
func (s *spiffeID) Name() string { return s.spiffeID }
+func (spiffeID) Attributes() map[string]string { return nil }
+
// NewSpiffeID creates a SPIFFE ID from id.
-func NewSpiffeID(id string) Identity {
- return &spiffeID{spiffeID: id}
-}
+func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} }
type hostname struct {
hostname string
@@ -55,10 +65,10 @@ type hostname struct {
func (h *hostname) Name() string { return h.hostname }
+func (hostname) Attributes() map[string]string { return nil }
+
// NewHostname creates a hostname from name.
-func NewHostname(name string) Identity {
- return &hostname{hostname: name}
-}
+func NewHostname(name string) Identity { return &hostname{hostname: name} }
type uid struct {
uid string
@@ -66,10 +76,10 @@ type uid struct {
func (h *uid) Name() string { return h.uid }
+func (uid) Attributes() map[string]string { return nil }
+
// NewUID creates a UID from name.
-func NewUID(name string) Identity {
- return &uid{uid: name}
-}
+func NewUID(name string) Identity { return &uid{uid: name} }
// VerificationModeType specifies the mode that S2A must use to verify the peer
// certificate chain.
@@ -83,6 +93,7 @@ const (
ReservedCustomVerificationMode3
ReservedCustomVerificationMode4
ReservedCustomVerificationMode5
+ ReservedCustomVerificationMode6
)
// ClientOptions contains the client-side options used to establish a secure
@@ -137,7 +148,7 @@ type ClientOptions struct {
FallbackOpts *FallbackOptions
// Generates an S2AStream interface for talking to the S2A server.
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
+ getS2AStream stream.GetS2AStream
// Serialized user specified policy for server authorization.
serverAuthorizationPolicy []byte
@@ -191,7 +202,7 @@ type ServerOptions struct {
VerificationMode VerificationModeType
// Generates an S2AStream interface for talking to the S2A server.
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
+ getS2AStream stream.GetS2AStream
}
// DefaultServerOptions returns the default server options.
@@ -202,17 +213,30 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions {
}
}
-func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) {
+func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) {
if identity == nil {
return nil, nil
}
switch id := identity.(type) {
case *spiffeID:
- return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil
+ return &s2av1pb.Identity{
+ IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
case *hostname:
- return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil
+ return &s2av1pb.Identity{
+ IdentityOneof: &s2av1pb.Identity_Hostname{Hostname: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
case *uid:
- return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil
+ return &s2av1pb.Identity{
+ IdentityOneof: &s2av1pb.Identity_Uid{Uid: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
+ case *UnspecifiedID:
+ return &s2av1pb.Identity{
+ Attributes: id.Attributes(),
+ }, nil
default:
return nil, errors.New("unrecognized identity type")
}
@@ -224,11 +248,24 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) {
}
switch id := identity.(type) {
case *spiffeID:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil
+ return &s2apb.Identity{
+ IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
case *hostname:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil
+ return &s2apb.Identity{
+ IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
case *uid:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil
+ return &s2apb.Identity{
+ IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()},
+ Attributes: id.Attributes(),
+ }, nil
+ case *UnspecifiedID:
+ return &s2apb.Identity{
+ Attributes: id.Attributes(),
+ }, nil
default:
return nil, errors.New("unrecognized identity type")
}
diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go
index 584bf32b1..ae2d5eb4c 100644
--- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go
+++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go
@@ -20,6 +20,8 @@
package stream
import (
+ "context"
+
s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
)
@@ -32,3 +34,6 @@ type S2AStream interface {
// Closes the channel to the S2A server.
CloseSend() error
}
+
+// GetS2AStream type is for generating an S2AStream interface for talking to the S2A server.
+type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error)
diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
index 44d4d0020..a8c082dd6 100644
--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
+++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- "v2": "2.13.0"
+ "v2": "2.14.1"
}
diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
index d63421b71..17cced15e 100644
--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
+++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
@@ -1,5 +1,24 @@
# Changelog
+## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19)
+
+
+### Bug Fixes
+
+* update golang.org/x/net to v0.33.0 ([#391](https://github.com/googleapis/gax-go/issues/391)) ([547a5b4](https://github.com/googleapis/gax-go/commit/547a5b43aa6f376f71242da9f18e65fbdfb342f6))
+
+
+### Documentation
+
+* fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd))
+
+## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13)
+
+
+### Features
+
+* **internallog:** add a logging support package ([#380](https://github.com/googleapis/gax-go/issues/380)) ([c877470](https://github.com/googleapis/gax-go/commit/c87747098135631a3de5865ed03aaf2c79fd9319))
+
## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22)
diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
index e12421cf5..2b284a24a 100644
--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go
+++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
@@ -30,4 +30,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "2.13.0"
+const Version = "2.14.1"
diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go
new file mode 100644
index 000000000..19f4be35c
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go
@@ -0,0 +1,134 @@
+// Copyright 2024, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package internal provides some common logic and types to other logging
+// sub-packages.
+package internal
+
+import (
+ "context"
+ "io"
+ "log/slog"
+ "os"
+ "strings"
+ "time"
+)
+
+const (
+ // LoggingLevelEnvVar is the environment variable used to enable logging
+ // at a particular level.
+ LoggingLevelEnvVar = "GOOGLE_SDK_GO_LOGGING_LEVEL"
+
+ googLvlKey = "severity"
+ googMsgKey = "message"
+ googSourceKey = "sourceLocation"
+ googTimeKey = "timestamp"
+)
+
+// NewLoggerWithWriter is exposed for testing.
+func NewLoggerWithWriter(w io.Writer) *slog.Logger {
+ lvl, loggingEnabled := checkLoggingLevel()
+ if !loggingEnabled {
+ return slog.New(noOpHandler{})
+ }
+ return slog.New(newGCPSlogHandler(lvl, w))
+}
+
+// checkLoggingLevel returned the configured logging level and whether or not
+// logging is enabled.
+func checkLoggingLevel() (slog.Leveler, bool) {
+ sLevel := strings.ToLower(os.Getenv(LoggingLevelEnvVar))
+ var level slog.Level
+ switch sLevel {
+ case "debug":
+ level = slog.LevelDebug
+ case "info":
+ level = slog.LevelInfo
+ case "warn":
+ level = slog.LevelWarn
+ case "error":
+ level = slog.LevelError
+ default:
+ return nil, false
+ }
+ return level, true
+}
+
+// newGCPSlogHandler returns a Handler that is configured to output in a JSON
+// format with well-known keys. For more information on this format see
+// https://cloud.google.com/logging/docs/agent/logging/configuration#special-fields.
+func newGCPSlogHandler(lvl slog.Leveler, w io.Writer) slog.Handler {
+ return slog.NewJSONHandler(w, &slog.HandlerOptions{
+ Level: lvl,
+ ReplaceAttr: replaceAttr,
+ })
+}
+
+// replaceAttr remaps default Go logging keys to match what is expected in
+// cloud logging.
+func replaceAttr(groups []string, a slog.Attr) slog.Attr {
+ if groups == nil {
+ if a.Key == slog.LevelKey {
+ a.Key = googLvlKey
+ return a
+ } else if a.Key == slog.MessageKey {
+ a.Key = googMsgKey
+ return a
+ } else if a.Key == slog.SourceKey {
+ a.Key = googSourceKey
+ return a
+ } else if a.Key == slog.TimeKey {
+ a.Key = googTimeKey
+ if a.Value.Kind() == slog.KindTime {
+ a.Value = slog.StringValue(a.Value.Time().Format(time.RFC3339))
+ }
+ return a
+ }
+ }
+ return a
+}
+
+// The handler returned if logging is not enabled.
+type noOpHandler struct{}
+
+func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
+ return false
+}
+
+func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
+ return nil
+}
+
+func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
+ return h
+}
+
+func (h noOpHandler) WithGroup(_ string) slog.Handler {
+ return h
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go
new file mode 100644
index 000000000..e47ab32ac
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go
@@ -0,0 +1,154 @@
+// Copyright 2024, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package internallog in intended for internal use by generated clients only.
+package internallog
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/googleapis/gax-go/v2/internallog/internal"
+)
+
+// New returns a new [slog.Logger] default logger, or the provided logger if
+// non-nil. The returned logger will be a no-op logger unless the environment
+// variable GOOGLE_SDK_GO_LOGGING_LEVEL is set.
+func New(l *slog.Logger) *slog.Logger {
+ if l != nil {
+ return l
+ }
+ return internal.NewLoggerWithWriter(os.Stderr)
+}
+
+// HTTPRequest returns a lazily evaluated [slog.LogValuer] for a
+// [http.Request] and the associated body.
+func HTTPRequest(req *http.Request, body []byte) slog.LogValuer {
+ return &request{
+ req: req,
+ payload: body,
+ }
+}
+
+type request struct {
+ req *http.Request
+ payload []byte
+}
+
+func (r *request) LogValue() slog.Value {
+ if r == nil || r.req == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
+ groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.req.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+// HTTPResponse returns a lazily evaluated [slog.LogValuer] for a
+// [http.Response] and the associated body.
+func HTTPResponse(resp *http.Response, body []byte) slog.LogValuer {
+ return &response{
+ resp: resp,
+ payload: body,
+ }
+}
+
+type response struct {
+ resp *http.Response
+ payload []byte
+}
+
+func (r *response) LogValue() slog.Value {
+ if r == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.resp.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+func processPayload(payload []byte) (slog.Attr, bool) {
+ peekChar := payload[0]
+ if peekChar == '{' {
+ // JSON object
+ var m map[string]any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else if peekChar == '[' {
+ // JSON array
+ var m []any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else {
+ // Everything else
+ buf := &bytes.Buffer{}
+ if err := json.Compact(buf, payload); err != nil {
+ // Write raw payload incase of error
+ buf.Write(payload)
+ }
+ return slog.String("payload", buf.String()), true
+ }
+ return slog.Attr{}, false
+}
diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml
deleted file mode 100644
index 8a0681af8..000000000
--- a/vendor/github.com/imdario/mergo/.deepsource.toml
+++ /dev/null
@@ -1,12 +0,0 @@
-version = 1
-
-test_patterns = [
- "*_test.go"
-]
-
-[[analyzers]]
-name = "go"
-enabled = true
-
- [analyzers.meta]
- import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
deleted file mode 100644
index 529c3412b..000000000
--- a/vendor/github.com/imdario/mergo/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-#### joe made this: http://goel.io/joe
-
-#### go ####
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-
-#### vim ####
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-v][a-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-
-# Temporary
-.netrwhist
-*~
-# Auto-generated tag files
-tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
deleted file mode 100644
index d324c43ba..000000000
--- a/vendor/github.com/imdario/mergo/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-install:
- - go get -t
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/mattn/goveralls
-script:
- - go test -race -v ./...
-after_script:
- - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
deleted file mode 100644
index 469b44907..000000000
--- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
deleted file mode 100644
index aa8cbd7ce..000000000
--- a/vendor/github.com/imdario/mergo/README.md
+++ /dev/null
@@ -1,247 +0,0 @@
-# Mergo
-
-
-[![GoDoc][3]][4]
-[![GitHub release][5]][6]
-[![GoCard][7]][8]
-[![Build Status][1]][2]
-[![Coverage Status][9]][10]
-[![Sourcegraph][11]][12]
-[![FOSSA Status][13]][14]
-
-[![GoCenter Kudos][15]][16]
-
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
-[3]: https://godoc.org/github.com/imdario/mergo?status.svg
-[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://img.shields.io/github/release/imdario/mergo.svg
-[6]: https://github.com/imdario/mergo/releases
-[7]: https://goreportcard.com/badge/imdario/mergo
-[8]: https://goreportcard.com/report/github.com/imdario/mergo
-[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[10]: https://coveralls.io/github/imdario/mergo?branch=master
-[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
-[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
-[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
-[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
-[16]: https://search.gocenter.io/github.com/imdario/mergo
-
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
-
-### Important note
-
-Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
-
-Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
-
-If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
-
-### Donations
-
-If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
-
-
-[](https://beerpay.io/imdario/mergo)
-[](https://beerpay.io/imdario/mergo)
-
-
-### Mergo in the wild
-
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-- [containerssh/containerssh](https://github.com/containerssh/containerssh)
-
-## Install
-
- go get github.com/imdario/mergo
-
- // use in your .go code
- import (
- "github.com/imdario/mergo"
- )
-
-## Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
-```go
-if err := mergo.Merge(&dst, src); err != nil {
- // ...
-}
-```
-
-Also, you can merge overwriting values using the transformer `WithOverride`.
-
-```go
-if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
- // ...
-}
-```
-
-Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
-
-```go
-if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
-}
-```
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-
-Here is a nice example:
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
-)
-
-type Foo struct {
- A string
- B int64
-}
-
-func main() {
- src := Foo{
- A: "one",
- B: 2,
- }
- dest := Foo{
- A: "two",
- }
- mergo.Merge(&dest, src)
- fmt.Println(dest)
- // Will print
- // {two 2}
-}
-```
-
-Note: if test are failing due missing package, please execute:
-
- go get gopkg.in/yaml.v2
-
-### Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
- "reflect"
- "time"
-)
-
-type timeTransformer struct {
-}
-
-func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
- if typ == reflect.TypeOf(time.Time{}) {
- return func(dst, src reflect.Value) error {
- if dst.CanSet() {
- isZero := dst.MethodByName("IsZero")
- result := isZero.Call([]reflect.Value{})
- if result[0].Bool() {
- dst.Set(src)
- }
- }
- return nil
- }
- }
- return nil
-}
-
-type Snapshot struct {
- Time time.Time
- // ...
-}
-
-func main() {
- src := Snapshot{time.Now()}
- dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
- fmt.Println(dest)
- // Will print
- // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
-}
-```
-
-
-## Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
-
-## About
-
-Written by [Dario Castañé](http://dario.im).
-
-## Top Contributors
-
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
-## License
-
-[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
-
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
deleted file mode 100644
index fcd985f99..000000000
--- a/vendor/github.com/imdario/mergo/doc.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Status
-
-It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
-
-Important note
-
-Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
-
-Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
-
-If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
-
-Install
-
-Do your usual installation procedure:
-
- go get github.com/imdario/mergo
-
- // use in your .go code
- import (
- "github.com/imdario/mergo"
- )
-
-Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
- if err := mergo.Merge(&dst, src); err != nil {
- // ...
- }
-
-Also, you can merge overwriting values using the transformer WithOverride.
-
- if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
- // ...
- }
-
-Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
-
- if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
- }
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
-
-Here is a nice example:
-
- package main
-
- import (
- "fmt"
- "github.com/imdario/mergo"
- )
-
- type Foo struct {
- A string
- B int64
- }
-
- func main() {
- src := Foo{
- A: "one",
- B: 2,
- }
- dest := Foo{
- A: "two",
- }
- mergo.Merge(&dest, src)
- fmt.Println(dest)
- // Will print
- // {two 2}
- }
-
-Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
-
- package main
-
- import (
- "fmt"
- "github.com/imdario/mergo"
- "reflect"
- "time"
- )
-
- type timeTransformer struct {
- }
-
- func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
- if typ == reflect.TypeOf(time.Time{}) {
- return func(dst, src reflect.Value) error {
- if dst.CanSet() {
- isZero := dst.MethodByName("IsZero")
- result := isZero.Call([]reflect.Value{})
- if result[0].Bool() {
- dst.Set(src)
- }
- }
- return nil
- }
- }
- return nil
- }
-
- type Snapshot struct {
- Time time.Time
- // ...
- }
-
- func main() {
- src := Snapshot{time.Now()}
- dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
- fmt.Println(dest)
- // Will print
- // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
- }
-
-Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
-
-About
-
-Written by Dario Castañé: https://da.rio.hn
-
-License
-
-BSD 3-Clause license, as Go language.
-
-*/
-package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
deleted file mode 100644
index a13a7ee46..000000000
--- a/vendor/github.com/imdario/mergo/map.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2014 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
- "unicode"
- "unicode/utf8"
-)
-
-func changeInitialCase(s string, mapper func(rune) rune) string {
- if s == "" {
- return s
- }
- r, n := utf8.DecodeRuneInString(s)
- return string(mapper(r)) + s[n:]
-}
-
-func isExported(field reflect.StructField) bool {
- r, _ := utf8.DecodeRuneInString(field.Name)
- return r >= 'A' && r <= 'Z'
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- zeroValue := reflect.Value{}
- switch dst.Kind() {
- case reflect.Map:
- dstMap := dst.Interface().(map[string]interface{})
- for i, n := 0, src.NumField(); i < n; i++ {
- srcType := src.Type()
- field := srcType.Field(i)
- if !isExported(field) {
- continue
- }
- fieldName := field.Name
- fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
- dstMap[fieldName] = src.Field(i).Interface()
- }
- }
- case reflect.Ptr:
- if dst.IsNil() {
- v := reflect.New(dst.Type().Elem())
- dst.Set(v)
- }
- dst = dst.Elem()
- fallthrough
- case reflect.Struct:
- srcMap := src.Interface().(map[string]interface{})
- for key := range srcMap {
- config.overwriteWithEmptyValue = true
- srcValue := srcMap[key]
- fieldName := changeInitialCase(key, unicode.ToUpper)
- dstElement := dst.FieldByName(fieldName)
- if dstElement == zeroValue {
- // We discard it because the field doesn't exist.
- continue
- }
- srcElement := reflect.ValueOf(srcValue)
- dstKind := dstElement.Kind()
- srcKind := srcElement.Kind()
- if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
- srcElement = srcElement.Elem()
- srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
- } else if dstKind == reflect.Ptr {
- // Can this work? I guess it can't.
- if srcKind != reflect.Ptr && srcElement.CanAddr() {
- srcPtr := srcElement.Addr()
- srcElement = reflect.ValueOf(srcPtr)
- srcKind = reflect.Ptr
- }
- }
-
- if !srcElement.IsValid() {
- continue
- }
- if srcKind == dstKind {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if srcKind == reflect.Map {
- if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
- }
- }
- }
- return
-}
-
-// Map sets fields' values in dst from src.
-// src can be a map with string keys or a struct. dst must be the opposite:
-// if src is a map, dst must be a valid pointer to struct. If src is a struct,
-// dst must be map[string]interface{}.
-// It won't merge unexported (private) fields and will do recursively
-// any exported field.
-// If dst is a map, keys will be src fields' names in lower camel case.
-// Missing key in src that doesn't match a field in dst will be skipped. This
-// doesn't apply if dst is a map.
-// This is separated method from Merge because it is cleaner and it keeps sane
-// semantics: merging equal types, mapping different (restricted) types.
-func Map(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, opts...)
-}
-
-// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: Use Map(…) with WithOverride
-func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, append(opts, WithOverride)...)
-}
-
-func _map(dst, src interface{}, opts ...func(*Config)) error {
- if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
- }
- var (
- vDst, vSrc reflect.Value
- err error
- )
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- // To be friction-less, we redirect equal-type arguments
- // to deepMerge. Only because arguments can be anything.
- if vSrc.Kind() == vDst.Kind() {
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
- }
- switch vSrc.Kind() {
- case reflect.Struct:
- if vDst.Kind() != reflect.Map {
- return ErrExpectedMapAsDestination
- }
- case reflect.Map:
- if vDst.Kind() != reflect.Struct {
- return ErrExpectedStructAsDestination
- }
- default:
- return ErrNotSupported
- }
- return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
deleted file mode 100644
index 8c2a8fcd9..000000000
--- a/vendor/github.com/imdario/mergo/merge.go
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
-)
-
-func hasMergeableFields(dst reflect.Value) (exported bool) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- field := dst.Type().Field(i)
- if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
- exported = exported || hasMergeableFields(dst.Field(i))
- } else if isExportedComponent(&field) {
- exported = exported || len(field.PkgPath) == 0
- }
- }
- return
-}
-
-func isExportedComponent(field *reflect.StructField) bool {
- pkgPath := field.PkgPath
- if len(pkgPath) > 0 {
- return false
- }
- c := field.Name[0]
- if 'a' <= c && c <= 'z' || c == '_' {
- return false
- }
- return true
-}
-
-type Config struct {
- Overwrite bool
- AppendSlice bool
- TypeCheck bool
- Transformers Transformers
- overwriteWithEmptyValue bool
- overwriteSliceWithEmptyValue bool
- sliceDeepCopy bool
- debug bool
-}
-
-type Transformers interface {
- Transformer(reflect.Type) func(dst, src reflect.Value) error
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
- typeCheck := config.TypeCheck
- overwriteWithEmptySrc := config.overwriteWithEmptyValue
- overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
- sliceDeepCopy := config.sliceDeepCopy
-
- if !src.IsValid() {
- return
- }
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
-
- if config.Transformers != nil && !isEmptyValue(dst) {
- if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
- err = fn(dst, src)
- return
- }
- }
-
- switch dst.Kind() {
- case reflect.Struct:
- if hasMergeableFields(dst) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
- return
- }
- }
- } else {
- if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
- dst.Set(src)
- }
- }
- case reflect.Map:
- if dst.IsNil() && !src.IsNil() {
- if dst.CanSet() {
- dst.Set(reflect.MakeMap(dst.Type()))
- } else {
- dst = src
- return
- }
- }
-
- if src.Kind() != reflect.Map {
- if overwrite {
- dst.Set(src)
- }
- return
- }
-
- for _, key := range src.MapKeys() {
- srcElement := src.MapIndex(key)
- if !srcElement.IsValid() {
- continue
- }
- dstElement := dst.MapIndex(key)
- switch srcElement.Kind() {
- case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
- if srcElement.IsNil() {
- if overwrite {
- dst.SetMapIndex(key, srcElement)
- }
- continue
- }
- fallthrough
- default:
- if !srcElement.CanInterface() {
- continue
- }
- switch reflect.TypeOf(srcElement.Interface()).Kind() {
- case reflect.Struct:
- fallthrough
- case reflect.Ptr:
- fallthrough
- case reflect.Map:
- srcMapElm := srcElement
- dstMapElm := dstElement
- if srcMapElm.CanInterface() {
- srcMapElm = reflect.ValueOf(srcMapElm.Interface())
- if dstMapElm.IsValid() {
- dstMapElm = reflect.ValueOf(dstMapElm.Interface())
- }
- }
- if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
- return
- }
- case reflect.Slice:
- srcSlice := reflect.ValueOf(srcElement.Interface())
-
- var dstSlice reflect.Value
- if !dstElement.IsValid() || dstElement.IsNil() {
- dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
- } else {
- dstSlice = reflect.ValueOf(dstElement.Interface())
- }
-
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
- if typeCheck && srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
- }
- dstSlice = srcSlice
- } else if config.AppendSlice {
- if srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
- }
- dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
- } else if sliceDeepCopy {
- i := 0
- for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
- srcElement := srcSlice.Index(i)
- dstElement := dstSlice.Index(i)
-
- if srcElement.CanInterface() {
- srcElement = reflect.ValueOf(srcElement.Interface())
- }
- if dstElement.CanInterface() {
- dstElement = reflect.ValueOf(dstElement.Interface())
- }
-
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- }
-
- }
- dst.SetMapIndex(key, dstSlice)
- }
- }
- if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
- continue
- }
-
- if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- dst.SetMapIndex(key, srcElement)
- }
- }
- case reflect.Slice:
- if !dst.CanSet() {
- break
- }
- if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
- dst.Set(src)
- } else if config.AppendSlice {
- if src.Type() != dst.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
- }
- dst.Set(reflect.AppendSlice(dst, src))
- } else if sliceDeepCopy {
- for i := 0; i < src.Len() && i < dst.Len(); i++ {
- srcElement := src.Index(i)
- dstElement := dst.Index(i)
- if srcElement.CanInterface() {
- srcElement = reflect.ValueOf(srcElement.Interface())
- }
- if dstElement.CanInterface() {
- dstElement = reflect.ValueOf(dstElement.Interface())
- }
-
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- }
- }
- case reflect.Ptr:
- fallthrough
- case reflect.Interface:
- if isReflectNil(src) {
- if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
- dst.Set(src)
- }
- break
- }
-
- if src.Kind() != reflect.Interface {
- if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- } else if src.Kind() == reflect.Ptr {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- } else if dst.Elem().Type() == src.Type() {
- if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return ErrDifferentArgumentsTypes
- }
- break
- }
-
- if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- break
- }
-
- if dst.Elem().Kind() == src.Elem().Kind() {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- break
- }
- default:
- mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
- if mustSet {
- if dst.CanSet() {
- dst.Set(src)
- } else {
- dst = src
- }
- }
- }
-
- return
-}
-
-// Merge will fill any empty for value type attributes on the dst struct using corresponding
-// src attributes if they themselves are not empty. dst and src must be valid same-type structs
-// and dst must be a pointer to struct.
-// It won't merge unexported (private) fields and will do recursively any exported field.
-func Merge(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, opts...)
-}
-
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: use Merge(…) with WithOverride
-func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, append(opts, WithOverride)...)
-}
-
-// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
-func WithTransformers(transformers Transformers) func(*Config) {
- return func(config *Config) {
- config.Transformers = transformers
- }
-}
-
-// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
-func WithOverride(config *Config) {
- config.Overwrite = true
-}
-
-// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
-func WithOverwriteWithEmptyValue(config *Config) {
- config.Overwrite = true
- config.overwriteWithEmptyValue = true
-}
-
-// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
-func WithOverrideEmptySlice(config *Config) {
- config.overwriteSliceWithEmptyValue = true
-}
-
-// WithAppendSlice will make merge append slices instead of overwriting it.
-func WithAppendSlice(config *Config) {
- config.AppendSlice = true
-}
-
-// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
-func WithTypeCheck(config *Config) {
- config.TypeCheck = true
-}
-
-// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
-func WithSliceDeepCopy(config *Config) {
- config.sliceDeepCopy = true
- config.Overwrite = true
-}
-
-func merge(dst, src interface{}, opts ...func(*Config)) error {
- if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
- return ErrNonPointerAgument
- }
- var (
- vDst, vSrc reflect.Value
- err error
- )
-
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- if vDst.Type() != vSrc.Type() {
- return ErrDifferentArgumentsTypes
- }
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
-
-// IsReflectNil is the reflect value provided nil
-func isReflectNil(v reflect.Value) bool {
- k := v.Kind()
- switch k {
- case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
- // Both interface and slice are nil if first word is 0.
- // Both are always bigger than a word; assume flagIndir.
- return v.IsNil()
- default:
- return false
- }
-}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
deleted file mode 100644
index 3cc926c7f..000000000
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "errors"
- "reflect"
-)
-
-// Errors reported by Mergo when it finds invalid arguments.
-var (
- ErrNilArguments = errors.New("src and dst must not be nil")
- ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
- ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
- ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
- ErrNonPointerAgument = errors.New("dst must be a pointer")
-)
-
-// During deepMerge, must keep track of checks that are
-// in progress. The comparison algorithm assumes that all
-// checks in progress are true when it reencounters them.
-// Visited are stored in a map indexed by 17 * a1 + a2;
-type visit struct {
- ptr uintptr
- typ reflect.Type
- next *visit
-}
-
-// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- if v.IsNil() {
- return true
- }
- return isEmptyValue(v.Elem())
- case reflect.Func:
- return v.IsNil()
- case reflect.Invalid:
- return true
- }
- return false
-}
-
-func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
- if dst == nil || src == nil {
- err = ErrNilArguments
- return
- }
- vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
- err = ErrNotSupported
- return
- }
- vSrc = reflect.ValueOf(src)
- // We check if vSrc is a pointer to dereference it.
- if vSrc.Kind() == reflect.Ptr {
- vSrc = vSrc.Elem()
- }
- return
-}
diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes
new file mode 100644
index 000000000..402433593
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitattributes
@@ -0,0 +1,2 @@
+* -text
+*.bin -text -diff
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
new file mode 100644
index 000000000..d31b37815
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -0,0 +1,32 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+/s2/cmd/_s2sx/sfx-exe
+
+# Linux perf files
+perf.data
+perf.data.old
+
+# gdb history
+.gdb_history
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
new file mode 100644
index 000000000..a22953805
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -0,0 +1,123 @@
+# This is an example goreleaser.yaml file with some sane defaults.
+# Make sure to check the documentation at http://goreleaser.com
+before:
+ hooks:
+ - ./gen.sh
+
+builds:
+ -
+ id: "s2c"
+ binary: s2c
+ main: ./s2/cmd/s2c/main.go
+ flags:
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+ -
+ id: "s2d"
+ binary: s2d
+ main: ./s2/cmd/s2d/main.go
+ flags:
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+ -
+ id: "s2sx"
+ binary: s2sx
+ main: ./s2/cmd/_s2sx/main.go
+ flags:
+ - -modfile=s2sx.mod
+ - -trimpath
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - aix
+ - linux
+ - freebsd
+ - netbsd
+ - windows
+ - darwin
+ goarch:
+ - 386
+ - amd64
+ - arm
+ - arm64
+ - ppc64
+ - ppc64le
+ - mips64
+ - mips64le
+ goarm:
+ - 7
+
+archives:
+ -
+ id: s2-binaries
+ name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - unpack/*
+ - s2/LICENSE
+ - s2/README.md
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ name_template: "{{ .Tag }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^doc:'
+ - '^docs:'
+ - '^test:'
+ - '^tests:'
+ - '^Update\sREADME.md'
+
+nfpms:
+ -
+ file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
+ vendor: Klaus Post
+ homepage: https://github.com/klauspost/compress
+ maintainer: Klaus Post
+ description: S2 Compression Tool
+ license: BSD 3-Clause
+ formats:
+ - deb
+ - rpm
diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE
new file mode 100644
index 000000000..87d557477
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/LICENSE
@@ -0,0 +1,304 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2019 Klaus Post. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+
+Files: gzhttp/*
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2017 The New York Times Company
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------
+
+Files: s2/cmd/internal/readahead/*
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+---------------------
+Files: snappy/*
+Files: internal/snapref/*
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------
+
+Files: s2/cmd/internal/filepathx/*
+
+Copyright 2016 The filepathx Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
new file mode 100644
index 000000000..05c7359e4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -0,0 +1,700 @@
+# compress
+
+This package provides various compression algorithms.
+
+* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
+* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
+* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
+* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
+* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
+
+[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
+[](https://github.com/klauspost/compress/actions/workflows/go.yml)
+[](https://sourcegraph.com/github.com/klauspost/compress?badge)
+
+# changelog
+
+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
+ * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
+ * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
+
+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
+ * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
+ * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
+ * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
+ * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
+ * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
+
+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)
+ * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887
+ * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886
+ * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892
+ * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890
+ * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891
+
+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)
+ * fse: Fix max header size https://github.com/klauspost/compress/pull/881
+ * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877
+ * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883
+
+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
+ * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
+
+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
+ * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+ * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
+ * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+
+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
+ * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
+ * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
+ * flate: Add limited window compression https://github.com/klauspost/compress/pull/843
+ * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
+ * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
+ * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+
+
+ See changes to v1.16.x
+
+
+* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
+ * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
+ * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
+
+* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
+ * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
+ * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
+ * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
+ * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
+
+* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
+ * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
+ * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
+
+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
+ * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
+ * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
+ * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
+ * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
+ * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
+ * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
+ * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+
+* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
+ * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
+ * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
+ * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
+ * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
+ * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
+
+* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
+ * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
+ * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
+ * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
+ * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
+ * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
+ * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+
+
+
+ See changes to v1.15.x
+
+* Jan 21st, 2023 (v1.15.15)
+ * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
+ * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
+ * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
+
+* Jan 3rd, 2023 (v1.15.14)
+
+ * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
+ * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
+ * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
+ * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
+
+* Dec 11, 2022 (v1.15.13)
+ * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
+ * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
+
+* Oct 26, 2022 (v1.15.12)
+
+ * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
+ * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
+
+* Sept 26, 2022 (v1.15.11)
+
+ * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
+ * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
+ * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
+ * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
+
+* Sept 16, 2022 (v1.15.10)
+
+ * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+ * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
+ * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+ * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+ * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+ * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+ * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
+* July 13, 2022 (v1.15.8)
+
+ * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
+ * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
+ * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
+ * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
+ * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
+ * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
+ * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
+
+* June 29, 2022 (v1.15.7)
+
+ * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
+ * zip: Merge upstream https://github.com/klauspost/compress/pull/631
+ * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
+ * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
+ * flate: Faster histograms https://github.com/klauspost/compress/pull/620
+ * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
+
+* June 3, 2022 (v1.15.6)
+ * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
+ * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
+ * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
+ * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
+ * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
+ * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
+ * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
+ * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
+ * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
+ * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
+
+* May 25, 2022 (v1.15.5)
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+
+
+* May 11, 2022 (v1.15.4)
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
+
+* May 5, 2022 (v1.15.3)
+ * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
+ * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
+
+* Apr 26, 2022 (v1.15.2)
+ * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
+ * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
+ * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
+ * Minimum version is Go 1.16, added CI test on 1.18.
+
+* Mar 11, 2022 (v1.15.1)
+ * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
+ * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
+ * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
+ * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
+ * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
+
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+
+
+
+
+ See changes to v1.14.x
+
+* Feb 22, 2022 (v1.14.4)
+ * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
+ * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
+ * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
+ * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+
+* Feb 17, 2022 (v1.14.3)
+ * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
+ * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
+ * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
+
+* Jan 25, 2022 (v1.14.2)
+ * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
+ * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
+ * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
+ * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
+ * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
+ * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
+
+* Jan 11, 2022 (v1.14.1)
+ * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
+ * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
+ * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
+ * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
+ * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+
+
+
+ See changes to v1.13.x
+
+* Aug 30, 2021 (v1.13.5)
+ * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
+ * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
+ * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426)
+ * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421)
+
+* Aug 12, 2021 (v1.13.4)
+ * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
+ * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
+
+* Aug 3, 2021 (v1.13.3)
+ * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
+ * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
+ * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
+ * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
+ * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
+ * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
+
+* Jun 14, 2021 (v1.13.1)
+ * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
+ * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
+ * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
+ * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
+
+* Jun 3, 2021 (v1.13.0)
+ * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
+ * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
+ * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+
+
+
+
+ See changes to v1.12.x
+
+* May 25, 2021 (v1.12.3)
+ * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
+ * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
+ * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
+
+* Apr 27, 2021 (v1.12.2)
+ * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
+ * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
+ * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
+ * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
+ * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
+ * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
+
+* Apr 14, 2021 (v1.12.1)
+ * snappy package removed. Upstream added as dependency.
+ * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
+ * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
+ * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
+ * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
+ * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
+ * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+
+
+
+ See changes to v1.11.x
+
+* Mar 26, 2021 (v1.11.13)
+ * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
+ * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
+ * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
+ * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
+
+* Mar 5, 2021 (v1.11.12)
+ * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
+ * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
+
+* Mar 1, 2021 (v1.11.9)
+ * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
+ * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
+ * s2: Fix binaries.
+
+* Feb 25, 2021 (v1.11.8)
+ * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
+ * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
+ * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
+ * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
+
+* Jan 14, 2021 (v1.11.7)
+ * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
+ * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
+ * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
+ * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
+ * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
+
+* Jan 7, 2021 (v1.11.6)
+ * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
+ * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
+
+* Dec 20, 2020 (v1.11.4)
+ * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
+ * Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
+ * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
+ * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
+ * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
+
+* Nov 15, 2020 (v1.11.3)
+ * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
+ * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
+
+* Oct 11, 2020 (v1.11.2)
+ * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
+
+* Oct 1, 2020 (v1.11.1)
+ * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
+
+* Sept 8, 2020 (v1.11.0)
+ * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
+ * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
+ * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
+
+
+
+ See changes to v1.10.x
+
+* July 8, 2020 (v1.10.11)
+ * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
+ * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
+
+* June 23, 2020 (v1.10.10)
+ * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
+
+* June 16, 2020 (v1.10.9):
+ * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
+ * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
+ * Fuzzit tests removed. The service has been purchased and is no longer available.
+
+* June 5, 2020 (v1.10.8):
+ * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
+
+* June 1, 2020 (v1.10.7):
+ * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
+ * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
+ * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
+
+* May 21, 2020: (v1.10.6)
+ * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
+ * zstd: Stricter decompression checks.
+
+* April 12, 2020: (v1.10.5)
+ * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
+
+* Apr 8, 2020: (v1.10.4)
+ * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
+* Mar 11, 2020: (v1.10.3)
+ * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
+ * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
+ * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
+ * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
+ * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
+
+* Feb 27, 2020: (v1.10.2)
+ * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
+ * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
+
+* Feb 18, 2020: (v1.10.1)
+ * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
+ * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
+ * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
+
+* Feb 4, 2020: (v1.10.0)
+ * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
+ * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
+ * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
+ * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
+
+
+
+
+ See changes prior to v1.10.0
+
+* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
+* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
+* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
+* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
+* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
+* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
+* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
+* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
+* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
+* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
+* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
+* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
+* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
+* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
+* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
+* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
+* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
+* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
+* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
+* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
+* Nov 11, 2019: Reduce inflate memory use by 1KB.
+* Nov 10, 2019: Less allocations in deflate bit writer.
+* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
+* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
+* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
+* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
+* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
+
+
+
+
+ See changes prior to v1.9.0
+
+* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
+* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
+* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
+* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
+* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
+* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
+* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
+* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
+* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
+* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
+* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
+* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
+* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
+* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
+* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
+* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
+* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
+* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
+* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
+* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
+* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
+* June 17, 2019: zstd decompression bugfix.
+* June 17, 2019: fix 32 bit builds.
+* June 17, 2019: Easier use in modules (less dependencies).
+* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
+* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
+* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
+* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
+* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
+* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
+* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
+* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
+* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
+* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
+* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
+* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
+* May 28, 2017: Reduce allocations when resetting decoder.
+* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
+* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
+* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
+* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
+* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
+* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
+* Mar 24, 2016: Small speedup for level 1-3.
+* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
+* Feb 19, 2016: Handle small payloads faster in level 1-3.
+* Feb 19, 2016: Added faster level 2 + 3 compression modes.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 14, 2016: Snappy: Merge upstream changes.
+* Feb 14, 2016: Snappy: Fix aggressive skipping.
+* Feb 14, 2016: Snappy: Update benchmark.
+* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
+* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
+* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
+* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
+* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
+* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
+* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
+* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
+* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
+* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
+* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
+* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
+* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
+* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
+* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
+
+
+
+# deflate usage
+
+The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
+
+| old import | new import | Documentation
+|--------------------|-----------------------------------------|--------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+
+* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
+
+You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
+
+The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
+
+Currently there is only minor speedup on decompression (mostly CRC32 calculation).
+
+Memory usage is typically 1MB for a Writer. stdlib is in the same range.
+If you expect to have a lot of concurrently allocated Writers consider using
+the stateless compress described below.
+
+For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+
+To disable all assembly add `-tags=noasm`. This works across all packages.
+
+# Stateless compression
+
+This package offers stateless compression as a special option for gzip/deflate.
+It will do compression but without maintaining any state between Write calls.
+
+This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
+
+This is only relevant in cases where you expect to run many thousands of compressors concurrently,
+but with very little activity. This is *not* intended for regular web servers serving individual requests.
+
+Because of this, the size of actual Write calls will affect output size.
+
+In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
+
+For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
+
+A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
+
+```go
+ // replace 'ioutil.Discard' with your output.
+ gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
+ if err != nil {
+ return err
+ }
+ defer gzw.Close()
+
+ w := bufio.NewWriterSize(gzw, 4096)
+ defer w.Flush()
+
+ // Write to 'w'
+```
+
+This will only use up to 4KB in memory when the writer is idle.
+
+Compression is almost always worse than the fastest compression level
+and each write will allocate (a little) memory.
+
+# Performance Update 2018
+
+It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
+
+The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
+
+The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
+
+The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
+
+
+## Overall differences.
+
+There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
+
+The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
+
+This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
+
+There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
+
+## Web Content
+
+This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
+
+Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
+
+Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
+
+## Object files
+
+This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
+
+The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
+
+The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
+
+## Highly Compressible File
+
+This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
+
+It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
+
+So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
+
+## Medium-High Compressible
+
+This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
+
+We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
+
+## Medium Compressible
+
+I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
+
+The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
+
+
+## Un-compressible Content
+
+This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
+
+
+## Huffman only compression
+
+This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
+
+This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
+
+Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
+
+The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
+
+The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
+
+For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
+
+This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+
+# Other packages
+
+Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
+
+* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
+* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
+* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
+* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
+* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
+* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
+
+# license
+
+This code is licensed under the same conditions as the original Go code. See LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md
new file mode 100644
index 000000000..ca6685e2b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/SECURITY.md
@@ -0,0 +1,25 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Vulnerability Definition
+
+A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
+
+Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
+
+Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
+
+It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
+
+Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go
new file mode 100644
index 000000000..ea5a692d5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/compressible.go
@@ -0,0 +1,85 @@
+package compress
+
+import "math"
+
+// Estimate returns a normalized compressibility estimate of block b.
+// Values close to zero are likely uncompressible.
+// Values above 0.1 are likely to be compressible.
+// Values above 0.5 are very compressible.
+// Very small lengths will return 0.
+func Estimate(b []byte) float64 {
+ if len(b) < 16 {
+ return 0
+ }
+
+ // Correctly predicted order 1
+ hits := 0
+ lastMatch := false
+ var o1 [256]byte
+ var hist [256]int
+ c1 := byte(0)
+ for _, c := range b {
+ if c == o1[c1] {
+ // We only count a hit if there was two correct predictions in a row.
+ if lastMatch {
+ hits++
+ }
+ lastMatch = true
+ } else {
+ lastMatch = false
+ }
+ o1[c1] = c
+ c1 = c
+ hist[c]++
+ }
+
+ // Use x^0.6 to give better spread
+ prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
+
+ // Calculate histogram distribution
+ variance := float64(0)
+ avg := float64(len(b)) / 256
+
+ for _, v := range hist {
+ Δ := float64(v) - avg
+ variance += Δ * Δ
+ }
+
+ stddev := math.Sqrt(float64(variance)) / float64(len(b))
+ exp := math.Sqrt(1 / float64(len(b)))
+
+ // Subtract expected stddev
+ stddev -= exp
+ if stddev < 0 {
+ stddev = 0
+ }
+ stddev *= 1 + exp
+
+ // Use x^0.4 to give better spread
+ entropy := math.Pow(stddev, 0.4)
+
+ // 50/50 weight between prediction and histogram distribution
+ return math.Pow((prediction+entropy)/2, 0.9)
+}
+
+// ShannonEntropyBits returns the number of bits minimum required to represent
+// an entropy encoding of the input bytes.
+// https://en.wiktionary.org/wiki/Shannon_entropy
+func ShannonEntropyBits(b []byte) int {
+ if len(b) == 0 {
+ return 0
+ }
+ var hist [256]int
+ for _, c := range b {
+ hist[c]++
+ }
+ shannon := float64(0)
+ invTotal := 1.0 / float64(len(b))
+ for _, v := range hist[:] {
+ if v > 0 {
+ n := float64(v)
+ shannon += math.Ceil(-math.Log2(n*invTotal) * n)
+ }
+ }
+ return int(math.Ceil(shannon))
+}
diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md
new file mode 100644
index 000000000..ea7324da6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/README.md
@@ -0,0 +1,79 @@
+# Finite State Entropy
+
+This package provides Finite State Entropy encoding and decoding.
+
+Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS))
+encoding provides a fast near-optimal symbol encoding/decoding
+for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd).
+
+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding.
+
+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse)
+
+## News
+
+ * Feb 2018: First implementation released. Consider this beta software for now.
+
+# Usage
+
+This package provides a low level interface that allows to compress single independent blocks.
+
+Each block is separate, and there is no built in integrity checks.
+This means that the caller should keep track of block sizes and also do checksums if needed.
+
+Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function.
+You must provide input and will receive the output and maybe an error.
+
+These error values can be returned:
+
+| Error | Description |
+|---------------------|-----------------------------------------------------------------------------|
+| `` | Everything ok, output is returned |
+| `ErrIncompressible` | Returned when input is judged to be too hard to compress |
+| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated |
+| `(error)` | An internal error occurred. |
+
+As can be seen above there are errors that will be returned even under normal operation so it is important to handle these.
+
+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object
+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same
+object can be used for both.
+
+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
+
+Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function.
+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
+your input was likely corrupted.
+
+It is important to note that a successful decoding does *not* mean your output matches your original input.
+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
+
+For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples).
+
+# Performance
+
+A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors.
+All compression functions are currently only running on the calling goroutine so only one core will be used per block.
+
+The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input
+is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be
+beneficial to transpose all your input values down by 64.
+
+With moderate block sizes around 64k speed are typically 200MB/s per core for compression and
+around 300MB/s decompression speed.
+
+The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s.
+
+# Plans
+
+At one point, more internals will be exposed to facilitate more "expert" usage of the components.
+
+A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261).
+
+# Contributing
+
+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking
+changes will likely not be accepted. If in doubt open an issue before writing the PR.
\ No newline at end of file
diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go
new file mode 100644
index 000000000..f65eb3909
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitreader.go
@@ -0,0 +1,122 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.bitsRead += 8 - uint8(highBits(uint32(v)))
+ return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) uint16 {
+ if n == 0 || b.bitsRead >= 64 {
+ return 0
+ }
+ return b.getBitsFast(n)
+}
+
+// getBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) getBitsFast(n uint8) uint16 {
+ const regMask = 64 - 1
+ v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+ b.bitsRead += n
+ return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+ // 2 bounds checks.
+ v := b.in[b.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off > 4 {
+ v := b.in[b.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value = (b.value << 8) | uint64(b.in[b.off-1])
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+ return b.bitsRead >= 64 && b.off == 0
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+ // Release reference.
+ b.in = nil
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
new file mode 100644
index 000000000..e82fa3bb7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -0,0 +1,167 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import "fmt"
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+ bitContainer uint64
+ nBits uint8
+ out []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF} /* up to 16 bits */
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits16ZeroNC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+// This is fastest if bits can be zero.
+func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
+ if bits == 0 {
+ return
+ }
+ value <<= (16 - bits) & 15
+ value >>= (16 - bits) & 15
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// flush will flush all pending full bytes.
+// There will be at least 56 bits available for writing when this has been called.
+// Using flush32 is faster, but leaves less space for writing.
+func (b *bitWriter) flush() {
+ v := b.nBits >> 3
+ switch v {
+ case 0:
+ case 1:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ )
+ case 2:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ )
+ case 3:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ )
+ case 4:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ )
+ case 5:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ )
+ case 6:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ )
+ case 7:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ byte(b.bitContainer>>48),
+ )
+ case 8:
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24),
+ byte(b.bitContainer>>32),
+ byte(b.bitContainer>>40),
+ byte(b.bitContainer>>48),
+ byte(b.bitContainer>>56),
+ )
+ default:
+ panic(fmt.Errorf("bits (%d) > 64", b.nBits))
+ }
+ b.bitContainer >>= v << 3
+ b.nBits &= 7
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+ if b.nBits < 32 {
+ return
+ }
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24))
+ b.nBits -= 32
+ b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+ nbBytes := (b.nBits + 7) >> 3
+ for i := uint8(0); i < nbBytes; i++ {
+ b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+ }
+ b.nBits = 0
+ b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() {
+ // End mark
+ b.addBits16Clean(1, 1)
+ // flush until next byte.
+ b.flushAlign()
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+ b.bitContainer = 0
+ b.nBits = 0
+ b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go
new file mode 100644
index 000000000..abade2d60
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/bytereader.go
@@ -0,0 +1,47 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+ b []byte
+ off int
+}
+
+// init will initialize the reader and set the input.
+func (b *byteReader) init(in []byte) {
+ b.b = in
+ b.off = 0
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+ b.off += int(n)
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
+ v3 := uint32(b2[3])
+ v2 := uint32(b2[2])
+ v1 := uint32(b2[1])
+ v0 := uint32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+ return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+ return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
new file mode 100644
index 000000000..074018d8f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -0,0 +1,683 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package fse
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Compress the input bytes. Input must be < 2GB.
+// Provide a Scratch buffer to avoid memory allocations.
+// Note that the output is also kept in the scratch buffer.
+// If input is too hard to compress, ErrIncompressible is returned.
+// If input is a single byte value repeated ErrUseRLE is returned.
+func Compress(in []byte, s *Scratch) ([]byte, error) {
+ if len(in) <= 1 {
+ return nil, ErrIncompressible
+ }
+ if len(in) > (2<<30)-1 {
+ return nil, errors.New("input too big, must be < 2GB")
+ }
+ s, err := s.prepare(in)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create histogram, if none was provided.
+ maxCount := s.maxCount
+ if maxCount == 0 {
+ maxCount = s.countSimple(in)
+ }
+ // Reset for next run.
+ s.clearCount = true
+ s.maxCount = 0
+ if maxCount == len(in) {
+ // One symbol, use RLE
+ return nil, ErrUseRLE
+ }
+ if maxCount == 1 || maxCount < (len(in)>>7) {
+ // Each symbol present maximum once or too well distributed.
+ return nil, ErrIncompressible
+ }
+ s.optimalTableLog()
+ err = s.normalizeCount()
+ if err != nil {
+ return nil, err
+ }
+ err = s.writeCount()
+ if err != nil {
+ return nil, err
+ }
+
+ if false {
+ err = s.validateNorm()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = s.buildCTable()
+ if err != nil {
+ return nil, err
+ }
+ err = s.compress(in)
+ if err != nil {
+ return nil, err
+ }
+ s.Out = s.bw.out
+ // Check if we compressed.
+ if len(s.Out) >= len(in) {
+ return nil, ErrIncompressible
+ }
+ return s.Out, nil
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+ bw *bitWriter
+ stateTable []uint16
+ state uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
+ c.bw = bw
+ c.stateTable = ct.stateTable
+
+ nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+ im := int32((nbBitsOut << 16) - first.deltaNbBits)
+ lu := (im >> nbBitsOut) + first.deltaFindState
+ c.state = c.stateTable[lu]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encode(symbolTT symbolTransform) {
+ nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+ dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+ c.bw.addBits16NC(c.state, uint8(nbBitsOut))
+ c.state = c.stateTable[dstState]
+}
+
+// encode the output symbol provided and write it to the bitstream.
+func (c *cState) encodeZero(symbolTT symbolTransform) {
+ nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
+ dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
+ c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
+ c.state = c.stateTable[dstState]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+ c.bw.flush32()
+ c.bw.addBits16NC(c.state, tableLog)
+ c.bw.flush()
+}
+
+// compress is the main compression loop that will encode the input from the last byte to the first.
+func (s *Scratch) compress(src []byte) error {
+ if len(src) <= 2 {
+ return errors.New("compress: src too small")
+ }
+ tt := s.ct.symbolTT[:256]
+ s.bw.reset(s.Out)
+
+ // Our two states each encodes every second byte.
+ // Last byte encoded (first byte decoded) will always be encoded by c1.
+ var c1, c2 cState
+
+ // Encode so remaining size is divisible by 4.
+ ip := len(src)
+ if ip&1 == 1 {
+ c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+ c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+ c1.encodeZero(tt[src[ip-3]])
+ ip -= 3
+ } else {
+ c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
+ c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
+ ip -= 2
+ }
+ if ip&2 != 0 {
+ c2.encodeZero(tt[src[ip-1]])
+ c1.encodeZero(tt[src[ip-2]])
+ ip -= 2
+ }
+ src = src[:ip]
+
+ // Main compression loop.
+ switch {
+ case !s.zeroBits && s.actualTableLog <= 8:
+ // We can encode 4 symbols without requiring a flush.
+ // We do not need to check if any output is 0 bits.
+ for ; len(src) >= 4; src = src[:len(src)-4] {
+ s.bw.flush32()
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
+ c2.encode(tt[v0])
+ c1.encode(tt[v1])
+ c2.encode(tt[v2])
+ c1.encode(tt[v3])
+ }
+ case !s.zeroBits:
+ // We do not need to check if any output is 0 bits.
+ for ; len(src) >= 4; src = src[:len(src)-4] {
+ s.bw.flush32()
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
+ c2.encode(tt[v0])
+ c1.encode(tt[v1])
+ s.bw.flush32()
+ c2.encode(tt[v2])
+ c1.encode(tt[v3])
+ }
+ case s.actualTableLog <= 8:
+ // We can encode 4 symbols without requiring a flush
+ for ; len(src) >= 4; src = src[:len(src)-4] {
+ s.bw.flush32()
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
+ c2.encodeZero(tt[v0])
+ c1.encodeZero(tt[v1])
+ c2.encodeZero(tt[v2])
+ c1.encodeZero(tt[v3])
+ }
+ default:
+ for ; len(src) >= 4; src = src[:len(src)-4] {
+ s.bw.flush32()
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
+ c2.encodeZero(tt[v0])
+ c1.encodeZero(tt[v1])
+ s.bw.flush32()
+ c2.encodeZero(tt[v2])
+ c1.encodeZero(tt[v3])
+ }
+ }
+
+ // Flush final state.
+ // Used to initialize state when decoding.
+ c2.flush(s.actualTableLog)
+ c1.flush(s.actualTableLog)
+
+ s.bw.close()
+ return nil
+}
+
+// writeCount will write the normalized histogram count to header.
+// This is read back by readNCount.
+func (s *Scratch) writeCount() error {
+ var (
+ tableLog = s.actualTableLog
+ tableSize = 1 << tableLog
+ previous0 bool
+ charnum uint16
+
+ maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
+
+ // Write Table Size
+ bitStream = uint32(tableLog - minTablelog)
+ bitCount = uint(4)
+ remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+ threshold = int16(tableSize)
+ nbBits = uint(tableLog + 1)
+ )
+ if cap(s.Out) < maxHeaderSize {
+ s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
+ }
+ outP := uint(0)
+ out := s.Out[:maxHeaderSize]
+
+ // stops at 1
+ for remaining > 1 {
+ if previous0 {
+ start := charnum
+ for s.norm[charnum] == 0 {
+ charnum++
+ }
+ for charnum >= start+24 {
+ start += 24
+ bitStream += uint32(0xFFFF) << bitCount
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ }
+ for charnum >= start+3 {
+ start += 3
+ bitStream += 3 << bitCount
+ bitCount += 2
+ }
+ bitStream += uint32(charnum-start) << bitCount
+ bitCount += 2
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ count := s.norm[charnum]
+ charnum++
+ max := (2*threshold - 1) - remaining
+ if count < 0 {
+ remaining += count
+ } else {
+ remaining -= count
+ }
+ count++ // +1 for extra accuracy
+ if count >= threshold {
+ count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+ }
+ bitStream += uint32(count) << bitCount
+ bitCount += nbBits
+ if count < max {
+ bitCount--
+ }
+
+ previous0 = count == 1
+ if remaining < 1 {
+ return errors.New("internal error: remaining<1")
+ }
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += (bitCount + 7) / 8
+
+ if charnum > s.symbolLen {
+ return errors.New("internal error: charnum > s.symbolLen")
+ }
+ s.Out = out[:outP]
+ return nil
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+ deltaFindState int32
+ deltaNbBits uint32
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+ return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+ tableSymbol []byte
+ stateTable []uint16
+ symbolTT []symbolTransform
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *Scratch) allocCtable() {
+ tableSize := 1 << s.actualTableLog
+ // get tableSymbol that is big enough.
+ if cap(s.ct.tableSymbol) < tableSize {
+ s.ct.tableSymbol = make([]byte, tableSize)
+ }
+ s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+ ctSize := tableSize
+ if cap(s.ct.stateTable) < ctSize {
+ s.ct.stateTable = make([]uint16, ctSize)
+ }
+ s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+ if cap(s.ct.symbolTT) < 256 {
+ s.ct.symbolTT = make([]symbolTransform, 256)
+ }
+ s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *Scratch) buildCTable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ var cumul [maxSymbolValue + 2]int16
+
+ s.allocCtable()
+ tableSymbol := s.ct.tableSymbol[:tableSize]
+ // symbol start positions
+ {
+ cumul[0] = 0
+ for ui, v := range s.norm[:s.symbolLen-1] {
+ u := byte(ui) // one less than reference
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = u
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ }
+ // Encode last symbol separately to avoid overflowing u
+ u := int(s.symbolLen - 1)
+ v := s.norm[s.symbolLen-1]
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = byte(u)
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ if uint32(cumul[s.symbolLen]) != tableSize {
+ return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+ }
+ cumul[s.symbolLen] = int16(tableSize) + 1
+ }
+ // Spread symbols
+ s.zeroBits = false
+ {
+ step := tableStep(tableSize)
+ tableMask := tableSize - 1
+ var position uint32
+ // if any symbol > largeLimit, we may have 0 bits output.
+ largeLimit := int16(1 << (s.actualTableLog - 1))
+ for ui, v := range s.norm[:s.symbolLen] {
+ symbol := byte(ui)
+ if v > largeLimit {
+ s.zeroBits = true
+ }
+ for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+ tableSymbol[position] = symbol
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ position = (position + step) & tableMask
+ } /* Low proba area */
+ }
+ }
+
+ // Check if we have gone through all positions
+ if position != 0 {
+ return errors.New("position!=0")
+ }
+ }
+
+ // Build table
+ table := s.ct.stateTable
+ {
+ tsi := int(tableSize)
+ for u, v := range tableSymbol {
+ // TableU16 : sorted by symbol order; gives next state value
+ table[cumul[v]] = uint16(tsi + u)
+ cumul[v]++
+ }
+ }
+
+ // Build Symbol Transformation Table
+ {
+ total := int16(0)
+ symbolTT := s.ct.symbolTT[:s.symbolLen]
+ tableLog := s.actualTableLog
+ tl := (uint32(tableLog) << 16) - (1 << tableLog)
+ for i, v := range s.norm[:s.symbolLen] {
+ switch v {
+ case 0:
+ case -1, 1:
+ symbolTT[i].deltaNbBits = tl
+ symbolTT[i].deltaFindState = int32(total - 1)
+ total++
+ default:
+ maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
+ minStatePlus := uint32(v) << maxBitsOut
+ symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+ symbolTT[i].deltaFindState = int32(total - v)
+ total += v
+ }
+ }
+ if total != int16(tableSize) {
+ return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+ }
+ }
+ return nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int) {
+ for _, v := range in {
+ s.count[v]++
+ }
+ m, symlen := uint32(0), s.symbolLen
+ for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
+ if v > m {
+ m = v
+ }
+ symlen = uint16(i) + 1
+ }
+ s.symbolLen = symlen
+ return int(m)
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+ minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
+ minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
+ if minBitsSrc < minBitsSymbols {
+ return uint8(minBitsSrc)
+ }
+ return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+ tableLog := s.TableLog
+ minBits := s.minTableLog()
+ maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
+ if maxBitsSrc < tableLog {
+ // Accuracy can be reduced
+ tableLog = maxBitsSrc
+ }
+ if minBits > tableLog {
+ tableLog = minBits
+ }
+ // Need a minimum to safely represent all symbol values
+ if tableLog < minTablelog {
+ tableLog = minTablelog
+ }
+ if tableLog > maxTableLog {
+ tableLog = maxTableLog
+ }
+ s.actualTableLog = tableLog
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+func (s *Scratch) normalizeCount() error {
+ var (
+ tableLog = s.actualTableLog
+ scale = 62 - uint64(tableLog)
+ step = (1 << 62) / uint64(s.br.remain())
+ vStep = uint64(1) << (scale - 20)
+ stillToDistribute = int16(1 << tableLog)
+ largest int
+ largestP int16
+ lowThreshold = (uint32)(s.br.remain() >> tableLog)
+ )
+
+ for i, cnt := range s.count[:s.symbolLen] {
+ // already handled
+ // if (count[s] == s.length) return 0; /* rle special case */
+
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ stillToDistribute--
+ } else {
+ proba := (int16)((uint64(cnt) * step) >> scale)
+ if proba < 8 {
+ restToBeat := vStep * uint64(rtbTable[proba])
+ v := uint64(cnt)*step - (uint64(proba) << scale)
+ if v > restToBeat {
+ proba++
+ }
+ }
+ if proba > largestP {
+ largestP = proba
+ largest = i
+ }
+ s.norm[i] = proba
+ stillToDistribute -= proba
+ }
+ }
+
+ if -stillToDistribute >= (s.norm[largest] >> 1) {
+ // corner case, need another normalization method
+ return s.normalizeCount2()
+ }
+ s.norm[largest] += stillToDistribute
+ return nil
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *Scratch) normalizeCount2() error {
+ const notYetAssigned = -2
+ var (
+ distributed uint32
+ total = uint32(s.br.remain())
+ tableLog = s.actualTableLog
+ lowThreshold = total >> tableLog
+ lowOne = (total * 3) >> (tableLog + 1)
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ distributed++
+ total -= cnt
+ continue
+ }
+ if cnt <= lowOne {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ s.norm[i] = notYetAssigned
+ }
+ toDistribute := (1 << tableLog) - distributed
+
+ if (total / toDistribute) > lowOne {
+ // risk of rounding to zero
+ lowOne = (total * 3) / (toDistribute * 2)
+ for i, cnt := range s.count[:s.symbolLen] {
+ if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ }
+ toDistribute = (1 << tableLog) - distributed
+ }
+ if distributed == uint32(s.symbolLen)+1 {
+ // all values are pretty poor;
+ // probably incompressible data (should have already been detected);
+ // find max, then give all remaining points to max
+ var maxV int
+ var maxC uint32
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt > maxC {
+ maxV = i
+ maxC = cnt
+ }
+ }
+ s.norm[maxV] += int16(toDistribute)
+ return nil
+ }
+
+ if total == 0 {
+ // all of the symbols were low enough for the lowOne or lowThreshold
+ for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+ if s.norm[i] > 0 {
+ toDistribute--
+ s.norm[i]++
+ }
+ }
+ return nil
+ }
+
+ var (
+ vStepLog = 62 - uint64(tableLog)
+ mid = uint64((1 << (vStepLog - 1)) - 1)
+ rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+ tmpTotal = mid
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if s.norm[i] == notYetAssigned {
+ var (
+ end = tmpTotal + uint64(cnt)*rStep
+ sStart = uint32(tmpTotal >> vStepLog)
+ sEnd = uint32(end >> vStepLog)
+ weight = sEnd - sStart
+ )
+ if weight < 1 {
+ return errors.New("weight < 1")
+ }
+ s.norm[i] = int16(weight)
+ tmpTotal = end
+ }
+ }
+ return nil
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *Scratch) validateNorm() (err error) {
+ var total int
+ for _, v := range s.norm[:s.symbolLen] {
+ if v >= 0 {
+ total += int(v)
+ } else {
+ total -= int(v)
+ }
+ }
+ defer func() {
+ if err == nil {
+ return
+ }
+ fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+ for i, v := range s.norm[:s.symbolLen] {
+ fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+ }
+ }()
+ if total != (1 << s.actualTableLog) {
+ return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax {
+ return errors.New("tableLog too large")
+ }
+ bitStream >>= 4
+ bitCount := uint(4)
+
+ s.actualTableLog = uint8(nbBits)
+ remaining := int32((1 << nbBits) + 1)
+ threshold := int32(1 << nbBits)
+ gotTotal := int32(0)
+ nbBits++
+
+ for remaining > 1 {
+ if previous0 {
+ n0 := charnum
+ for (bitStream & 0xFFFF) == 0xFFFF {
+ n0 += 24
+ if b.off < iend-5 {
+ b.advance(2)
+ bitStream = b.Uint32() >> bitCount
+ } else {
+ bitStream >>= 16
+ bitCount += 16
+ }
+ }
+ for (bitStream & 3) == 3 {
+ n0 += 3
+ bitStream >>= 2
+ bitCount += 2
+ }
+ n0 += uint16(bitStream & 3)
+ bitCount += 2
+ if n0 > maxSymbolValue {
+ return errors.New("maxSymbolValue too small")
+ }
+ for charnum < n0 {
+ s.norm[charnum&0xff] = 0
+ charnum++
+ }
+
+ if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ bitStream = b.Uint32() >> bitCount
+ } else {
+ bitStream >>= 2
+ }
+ }
+
+ max := (2*(threshold) - 1) - (remaining)
+ var count int32
+
+ if (int32(bitStream) & (threshold - 1)) < max {
+ count = int32(bitStream) & (threshold - 1)
+ bitCount += nbBits - 1
+ } else {
+ count = int32(bitStream) & (2*threshold - 1)
+ if count >= threshold {
+ count -= max
+ }
+ bitCount += nbBits
+ }
+
+ count-- // extra accuracy
+ if count < 0 {
+ // -1 means +1
+ remaining += count
+ gotTotal -= count
+ } else {
+ remaining -= count
+ gotTotal += count
+ }
+ s.norm[charnum&0xff] = int16(count)
+ charnum++
+ previous0 = count == 0
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+ if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ } else {
+ bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+ b.off = len(b.b) - 4
+ }
+ bitStream = b.Uint32() >> (bitCount & 31)
+ }
+ s.symbolLen = charnum
+
+ if s.symbolLen <= 1 {
+ return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+ }
+ if s.symbolLen > maxSymbolValue+1 {
+ return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+ }
+ if remaining != 1 {
+ return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+ }
+ if bitCount > 32 {
+ return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+ }
+ if gotTotal != 1<> 3)
+ return nil
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+type decSymbol struct {
+ newState uint16
+ symbol uint8
+ nbBits uint8
+}
+
+// allocDtable will allocate decoding tables if they are not big enough.
+func (s *Scratch) allocDtable() {
+ tableSize := 1 << s.actualTableLog
+ if cap(s.decTable) < tableSize {
+ s.decTable = make([]decSymbol, tableSize)
+ }
+ s.decTable = s.decTable[:tableSize]
+
+ if cap(s.ct.tableSymbol) < 256 {
+ s.ct.tableSymbol = make([]byte, 256)
+ }
+ s.ct.tableSymbol = s.ct.tableSymbol[:256]
+
+ if cap(s.ct.stateTable) < 256 {
+ s.ct.stateTable = make([]uint16, 256)
+ }
+ s.ct.stateTable = s.ct.stateTable[:256]
+}
+
+// buildDtable will build the decoding table.
+func (s *Scratch) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ s.allocDtable()
+ symbolNext := s.ct.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ s.zeroBits = false
+ {
+ largeLimit := int16(1 << (s.actualTableLog - 1))
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.decTable[highThreshold].symbol = uint8(i)
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ if v >= largeLimit {
+ s.zeroBits = true
+ }
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.decTable[position].symbol = uint8(ss)
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.decTable {
+ symbol := v.symbol
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.decTable[u].nbBits = nBits
+ newState := (nextState << nBits) - tableSize
+ if newState >= tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.decTable[u].newState = newState
+ }
+ }
+ return nil
+}
+
+// decompress will decompress the bitstream.
+// If the buffer is over-read an error is returned.
+func (s *Scratch) decompress() error {
+ br := &s.bits
+ if err := br.init(s.br.unread()); err != nil {
+ return err
+ }
+
+ var s1, s2 decoder
+ // Initialize and decode first state and symbol.
+ s1.init(br, s.decTable, s.actualTableLog)
+ s2.init(br, s.decTable, s.actualTableLog)
+
+ // Use temp table to avoid bound checks/append penalty.
+ var tmp = s.ct.tableSymbol[:256]
+ var off uint8
+
+ // Main part
+ if !s.zeroBits {
+ for br.off >= 8 {
+ br.fillFast()
+ tmp[off+0] = s1.nextFast()
+ tmp[off+1] = s2.nextFast()
+ br.fillFast()
+ tmp[off+2] = s1.nextFast()
+ tmp[off+3] = s2.nextFast()
+ off += 4
+ // When off is 0, we have overflowed and should write.
+ if off == 0 {
+ s.Out = append(s.Out, tmp...)
+ if len(s.Out) >= s.DecompressLimit {
+ return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+ }
+ }
+ }
+ } else {
+ for br.off >= 8 {
+ br.fillFast()
+ tmp[off+0] = s1.next()
+ tmp[off+1] = s2.next()
+ br.fillFast()
+ tmp[off+2] = s1.next()
+ tmp[off+3] = s2.next()
+ off += 4
+ if off == 0 {
+ s.Out = append(s.Out, tmp...)
+ // When off is 0, we have overflowed and should write.
+ if len(s.Out) >= s.DecompressLimit {
+ return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+ }
+ }
+ }
+ }
+ s.Out = append(s.Out, tmp[:off]...)
+
+ // Final bits, a bit more expensive check
+ for {
+ if s1.finished() {
+ s.Out = append(s.Out, s1.final(), s2.final())
+ break
+ }
+ br.fill()
+ s.Out = append(s.Out, s1.next())
+ if s2.finished() {
+ s.Out = append(s.Out, s2.final(), s1.final())
+ break
+ }
+ s.Out = append(s.Out, s2.next())
+ if len(s.Out) >= s.DecompressLimit {
+ return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
+ }
+ }
+ return br.close()
+}
+
+// decoder keeps track of the current state and updates it from the bitstream.
+type decoder struct {
+ state uint16
+ br *bitReader
+ dt []decSymbol
+}
+
+// init will initialize the decoder and read the first state from the stream.
+func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
+ d.dt = dt
+ d.br = in
+ d.state = in.getBits(tableLog)
+}
+
+// next returns the next symbol and sets the next state.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) next() uint8 {
+ n := &d.dt[d.state]
+ lowBits := d.br.getBits(n.nbBits)
+ d.state = n.newState + lowBits
+ return n.symbol
+}
+
+// finished returns true if all bits have been read from the bitstream
+// and the next state would require reading bits from the input.
+func (d *decoder) finished() bool {
+ return d.br.finished() && d.dt[d.state].nbBits > 0
+}
+
+// final returns the current state symbol without decoding the next.
+func (d *decoder) final() uint8 {
+ return d.dt[d.state].symbol
+}
+
+// nextFast returns the next symbol and sets the next state.
+// This can only be used if no symbols are 0 bits.
+// At least tablelog bits must be available in the bit reader.
+func (d *decoder) nextFast() uint8 {
+ n := d.dt[d.state]
+ lowBits := d.br.getBitsFast(n.nbBits)
+ d.state = n.newState + lowBits
+ return n.symbol
+}
diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go
new file mode 100644
index 000000000..535cbadfd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/fse/fse.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+// Package fse provides Finite State Entropy encoding and decoding.
+//
+// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
+// for byte blocks as implemented in zstd.
+//
+// See https://github.com/klauspost/compress/tree/master/fse for more information.
+package fse
+
+import (
+ "errors"
+ "fmt"
+ "math/bits"
+)
+
+const (
+ /*!MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+ maxMemoryUsage = 14
+ defaultMemoryUsage = 13
+
+ maxTableLog = maxMemoryUsage - 2
+ maxTablesize = 1 << maxTableLog
+ defaultTablelog = defaultMemoryUsage - 2
+ minTablelog = 5
+ maxSymbolValue = 255
+)
+
+var (
+ // ErrIncompressible is returned when input is judged to be too hard to compress.
+ ErrIncompressible = errors.New("input is not compressible")
+
+ // ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+ ErrUseRLE = errors.New("input is single value repeated")
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type Scratch struct {
+ // Private
+ count [maxSymbolValue + 1]uint32
+ norm [maxSymbolValue + 1]int16
+ br byteReader
+ bits bitReader
+ bw bitWriter
+ ct cTable // Compression tables.
+ decTable []decSymbol // Decompression table.
+ maxCount int // count of the most probable symbol
+
+ // Per block parameters.
+ // These can be used to override compression parameters of the block.
+ // Do not touch, unless you know what you are doing.
+
+ // Out is output buffer.
+ // If the scratch is re-used before the caller is done processing the output,
+ // set this field to nil.
+ // Otherwise the output buffer will be re-used for next Compression/Decompression step
+ // and allocation will be avoided.
+ Out []byte
+
+ // DecompressLimit limits the maximum decoded size acceptable.
+ // If > 0 decompression will stop when approximately this many bytes
+ // has been decoded.
+ // If 0, maximum size will be 2GB.
+ DecompressLimit int
+
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ zeroBits bool // no bits has prob > 50%.
+ clearCount bool // clear count
+
+ // MaxSymbolValue will override the maximum symbol value of the next block.
+ MaxSymbolValue uint8
+
+ // TableLog will attempt to override the tablelog for the next block.
+ TableLog uint8
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+// The returned slice will always be length 256.
+func (s *Scratch) Histogram() []uint32 {
+ return s.count[:]
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
+ s.maxCount = maxCount
+ s.symbolLen = uint16(maxSymbol) + 1
+ s.clearCount = maxCount != 0
+}
+
+// prepare will prepare and allocate scratch tables used for both compression and decompression.
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+ if s == nil {
+ s = &Scratch{}
+ }
+ if s.MaxSymbolValue == 0 {
+ s.MaxSymbolValue = 255
+ }
+ if s.TableLog == 0 {
+ s.TableLog = defaultTablelog
+ }
+ if s.TableLog > maxTableLog {
+ return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
+ }
+ if cap(s.Out) == 0 {
+ s.Out = make([]byte, 0, len(in))
+ }
+ if s.clearCount && s.maxCount == 0 {
+ for i := range s.count {
+ s.count[i] = 0
+ }
+ s.clearCount = false
+ }
+ s.br.init(in)
+ if s.DecompressLimit == 0 {
+ // Max size 2GB.
+ s.DecompressLimit = (2 << 30) - 1
+ }
+
+ return s, nil
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+ return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+func highBits(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh
new file mode 100644
index 000000000..aff942205
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gen.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+cd s2/cmd/_s2sx/ || exit 1
+go generate .
diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore
new file mode 100644
index 000000000..b3d262958
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/.gitignore
@@ -0,0 +1 @@
+/huff0-fuzz.zip
diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md
new file mode 100644
index 000000000..8b6e5c663
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/README.md
@@ -0,0 +1,89 @@
+# Huff0 entropy compression
+
+This package provides Huff0 encoding and decoding as used in zstd.
+
+[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders),
+a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU
+(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds.
+
+This can be used for compressing input with a lot of similar input values to the smallest number of bytes.
+This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders,
+but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding.
+
+* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0)
+
+## News
+
+This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
+
+This ensures that most functionality is well tested.
+
+# Usage
+
+This package provides a low level interface that allows to compress single independent blocks.
+
+Each block is separate, and there is no built in integrity checks.
+This means that the caller should keep track of block sizes and also do checksums if needed.
+
+Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and
+[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions.
+You must provide input and will receive the output and maybe an error.
+
+These error values can be returned:
+
+| Error | Description |
+|---------------------|-----------------------------------------------------------------------------|
+| `` | Everything ok, output is returned |
+| `ErrIncompressible` | Returned when input is judged to be too hard to compress |
+| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated |
+| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) |
+| `(error)` | An internal error occurred. |
+
+
+As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these.
+
+To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object
+that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same
+object can be used for both.
+
+Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this
+you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output.
+
+The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding.
+
+## Tables and re-use
+
+Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results.
+
+The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy)
+that controls this behaviour. See the documentation for details. This can be altered between each block.
+
+Do however note that this information is *not* stored in the output block and it is up to the users of the package to
+record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called,
+based on the boolean reported back from the CompressXX call.
+
+If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the
+[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object.
+
+## Decompressing
+
+The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable).
+This will initialize the decoding tables.
+You can supply the complete block to `ReadTable` and it will return the data part of the block
+which can be given to the decompressor.
+
+Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X)
+or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function.
+
+For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size.
+
+You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back
+your input was likely corrupted.
+
+It is important to note that a successful decoding does *not* mean your output matches your original input.
+There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid.
+
+# Contributing
+
+Contributions are always welcome. Be aware that adding public functions will require good justification and breaking
+changes will likely not be accepted. If in doubt open an issue before writing the PR.
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
new file mode 100644
index 000000000..e36d9742f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -0,0 +1,229 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderBytes struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderBytes) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.advance(8 - uint8(highBit32(uint32(v))))
+ return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderBytes) peekByteFast() uint8 {
+ got := uint8(b.value >> 56)
+ return got
+}
+
+func (b *bitReaderBytes) advance(n uint8) {
+ b.bitsRead += n
+ b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderBytes) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+
+ // 2 bounds checks.
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << (b.bitsRead - 32)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
+func (b *bitReaderBytes) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderBytes) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off > 4 {
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << (b.bitsRead - 32)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8)
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReaderBytes) finished() bool {
+ return b.off == 0 && b.bitsRead >= 64
+}
+
+func (b *bitReaderBytes) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderBytes) close() error {
+ // Release reference.
+ b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+// bitReaderShifted reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReaderShifted struct {
+ in []byte
+ off uint // next byte to read is at in[off - 1]
+ value uint64
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReaderShifted) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ b.off = uint(len(in))
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.advance(8 - uint8(highBit32(uint32(v))))
+ return nil
+}
+
+// peekBitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
+ return uint16(b.value >> ((64 - n) & 63))
+}
+
+func (b *bitReaderShifted) advance(n uint8) {
+ b.bitsRead += n
+ b.value <<= n & 63
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReaderShifted) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+
+ // 2 bounds checks.
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+ b.bitsRead -= 32
+ b.off -= 4
+}
+
+// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
+func (b *bitReaderShifted) fillFastStart() {
+ // Do single re-slice to avoid bounds checks.
+ b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.bitsRead = 0
+ b.off -= 8
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReaderShifted) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if b.off > 4 {
+ v := b.in[b.off-4 : b.off]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
+ b.bitsRead -= 32
+ b.off -= 4
+ return
+ }
+ for b.off > 0 {
+ b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63)
+ b.bitsRead -= 8
+ b.off--
+ }
+}
+
+func (b *bitReaderShifted) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReaderShifted) close() error {
+ // Release reference.
+ b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
new file mode 100644
index 000000000..0ebc9aaac
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -0,0 +1,102 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package huff0
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+ bitContainer uint64
+ nBits uint8
+ out []byte
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
+ enc := ct[symbol]
+ b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
+ if false {
+ if enc.nBits == 0 {
+ panic("nbits 0")
+ }
+ }
+ b.nBits += enc.nBits
+}
+
+// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
+ encA := ct[av]
+ encB := ct[bv]
+ sh := b.nBits & 63
+ combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
+ b.bitContainer |= combined << sh
+ if false {
+ if encA.nBits == 0 {
+ panic("nbitsA 0")
+ }
+ if encB.nBits == 0 {
+ panic("nbitsB 0")
+ }
+ }
+ b.nBits += encA.nBits + encB.nBits
+}
+
+// encFourSymbols adds up to 32 bits from four symbols.
+// It will not check if there is space for them,
+// so the caller must ensure that b has been flushed recently.
+func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
+ bitsA := encA.nBits
+ bitsB := bitsA + encB.nBits
+ bitsC := bitsB + encC.nBits
+ bitsD := bitsC + encD.nBits
+ combined := uint64(encA.val) |
+ (uint64(encB.val) << (bitsA & 63)) |
+ (uint64(encC.val) << (bitsB & 63)) |
+ (uint64(encD.val) << (bitsC & 63))
+ b.bitContainer |= combined << (b.nBits & 63)
+ b.nBits += bitsD
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+ if b.nBits < 32 {
+ return
+ }
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24))
+ b.nBits -= 32
+ b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+ nbBytes := (b.nBits + 7) >> 3
+ for i := uint8(0); i < nbBytes; i++ {
+ b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+ }
+ b.nBits = 0
+ b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() {
+ // End mark
+ b.addBits16Clean(1, 1)
+ // flush until next byte.
+ b.flushAlign()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
new file mode 100644
index 000000000..84aa3d12f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -0,0 +1,742 @@
+package huff0
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sync"
+)
+
+// Compress1X will compress the input.
+// The output can be decoded using Decompress1X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+ s, err = s.prepare(in)
+ if err != nil {
+ return nil, false, err
+ }
+ return compress(in, s, s.compress1X)
+}
+
+// Compress4X will compress the input. The input is split into 4 independent blocks
+// and compressed similar to Compress1X.
+// The output can be decoded using Decompress4X.
+// Supply a Scratch object. The scratch object contains state about re-use,
+// So when sharing across independent encodes, be sure to set the re-use policy.
+func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
+ s, err = s.prepare(in)
+ if err != nil {
+ return nil, false, err
+ }
+ if false {
+ // TODO: compress4Xp only slightly faster.
+ const parallelThreshold = 8 << 10
+ if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
+ return compress(in, s, s.compress4X)
+ }
+ return compress(in, s, s.compress4Xp)
+ }
+ return compress(in, s, s.compress4X)
+}
+
+func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
+ // Nuke previous table if we cannot reuse anyway.
+ if s.Reuse == ReusePolicyNone {
+ s.prevTable = s.prevTable[:0]
+ }
+
+ // Create histogram, if none was provided.
+ maxCount := s.maxCount
+ var canReuse = false
+ if maxCount == 0 {
+ maxCount, canReuse = s.countSimple(in)
+ } else {
+ canReuse = s.canUseTable(s.prevTable)
+ }
+
+ // We want the output size to be less than this:
+ wantSize := len(in)
+ if s.WantLogLess > 0 {
+ wantSize -= wantSize >> s.WantLogLess
+ }
+
+ // Reset for next run.
+ s.clearCount = true
+ s.maxCount = 0
+ if maxCount >= len(in) {
+ if maxCount > len(in) {
+ return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+ }
+ if len(in) == 1 {
+ return nil, false, ErrIncompressible
+ }
+ // One symbol, use RLE
+ return nil, false, ErrUseRLE
+ }
+ if maxCount == 1 || maxCount < (len(in)>>7) {
+ // Each symbol present maximum once or too well distributed.
+ return nil, false, ErrIncompressible
+ }
+ if s.Reuse == ReusePolicyMust && !canReuse {
+ // We must reuse, but we can't.
+ return nil, false, ErrIncompressible
+ }
+ if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
+ keepTable := s.cTable
+ keepTL := s.actualTableLog
+ s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
+ s.Out, err = compressor(in)
+ s.cTable = keepTable
+ s.actualTableLog = keepTL
+ if err == nil && len(s.Out) < wantSize {
+ s.OutData = s.Out
+ return s.Out, true, nil
+ }
+ if s.Reuse == ReusePolicyMust {
+ return nil, false, ErrIncompressible
+ }
+ // Do not attempt to re-use later.
+ s.prevTable = s.prevTable[:0]
+ }
+
+ // Calculate new table.
+ err = s.buildCTable()
+ if err != nil {
+ return nil, false, err
+ }
+
+ if false && !s.canUseTable(s.cTable) {
+ panic("invalid table generated")
+ }
+
+ if s.Reuse == ReusePolicyAllow && canReuse {
+ hSize := len(s.Out)
+ oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
+ newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
+ if oldSize <= hSize+newSize || hSize+12 >= wantSize {
+ // Retain cTable even if we re-use.
+ keepTable := s.cTable
+ keepTL := s.actualTableLog
+
+ s.cTable = s.prevTable
+ s.actualTableLog = s.prevTableLog
+ s.Out, err = compressor(in)
+
+ // Restore ctable.
+ s.cTable = keepTable
+ s.actualTableLog = keepTL
+ if err != nil {
+ return nil, false, err
+ }
+ if len(s.Out) >= wantSize {
+ return nil, false, ErrIncompressible
+ }
+ s.OutData = s.Out
+ return s.Out, true, nil
+ }
+ }
+
+ // Use new table
+ err = s.cTable.write(s)
+ if err != nil {
+ s.OutTable = nil
+ return nil, false, err
+ }
+ s.OutTable = s.Out
+
+ // Compress using new table
+ s.Out, err = compressor(in)
+ if err != nil {
+ s.OutTable = nil
+ return nil, false, err
+ }
+ if len(s.Out) >= wantSize {
+ s.OutTable = nil
+ return nil, false, ErrIncompressible
+ }
+ // Move current table into previous.
+ s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
+ s.OutData = s.Out[len(s.OutTable):]
+ return s.Out, false, nil
+}
+
+// EstimateSizes will estimate the data sizes
+func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
+ s, err = s.prepare(in)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ // Create histogram, if none was provided.
+ tableSz, dataSz, reuseSz = -1, -1, -1
+ maxCount := s.maxCount
+ var canReuse = false
+ if maxCount == 0 {
+ maxCount, canReuse = s.countSimple(in)
+ } else {
+ canReuse = s.canUseTable(s.prevTable)
+ }
+
+ // We want the output size to be less than this:
+ wantSize := len(in)
+ if s.WantLogLess > 0 {
+ wantSize -= wantSize >> s.WantLogLess
+ }
+
+ // Reset for next run.
+ s.clearCount = true
+ s.maxCount = 0
+ if maxCount >= len(in) {
+ if maxCount > len(in) {
+ return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
+ }
+ if len(in) == 1 {
+ return 0, 0, 0, ErrIncompressible
+ }
+ // One symbol, use RLE
+ return 0, 0, 0, ErrUseRLE
+ }
+ if maxCount == 1 || maxCount < (len(in)>>7) {
+ // Each symbol present maximum once or too well distributed.
+ return 0, 0, 0, ErrIncompressible
+ }
+
+ // Calculate new table.
+ err = s.buildCTable()
+ if err != nil {
+ return 0, 0, 0, err
+ }
+
+ if false && !s.canUseTable(s.cTable) {
+ panic("invalid table generated")
+ }
+
+ tableSz, err = s.cTable.estTableSize(s)
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ if canReuse {
+ reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
+ }
+ dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
+
+ // Restore
+ return tableSz, dataSz, reuseSz, nil
+}
+
+func (s *Scratch) compress1X(src []byte) ([]byte, error) {
+ return s.compress1xDo(s.Out, src), nil
+}
+
+func (s *Scratch) compress1xDo(dst, src []byte) []byte {
+ var bw = bitWriter{out: dst}
+
+ // N is length divisible by 4.
+ n := len(src)
+ n -= n & 3
+ cTable := s.cTable[:256]
+
+ // Encode last bytes.
+ for i := len(src) & 3; i > 0; i-- {
+ bw.encSymbol(cTable, src[n+i-1])
+ }
+ n -= 4
+ if s.actualTableLog <= 8 {
+ for ; n >= 0; n -= 4 {
+ tmp := src[n : n+4]
+ // tmp should be len 4
+ bw.flush32()
+ bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
+ }
+ } else {
+ for ; n >= 0; n -= 4 {
+ tmp := src[n : n+4]
+ // tmp should be len 4
+ bw.flush32()
+ bw.encTwoSymbols(cTable, tmp[3], tmp[2])
+ bw.flush32()
+ bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+ }
+ }
+ bw.close()
+ return bw.out
+}
+
+var sixZeros [6]byte
+
+func (s *Scratch) compress4X(src []byte) ([]byte, error) {
+ if len(src) < 12 {
+ return nil, ErrIncompressible
+ }
+ segmentSize := (len(src) + 3) / 4
+
+ // Add placeholder for output length
+ offsetIdx := len(s.Out)
+ s.Out = append(s.Out, sixZeros[:]...)
+
+ for i := 0; i < 4; i++ {
+ toDo := src
+ if len(toDo) > segmentSize {
+ toDo = toDo[:segmentSize]
+ }
+ src = src[len(toDo):]
+
+ idx := len(s.Out)
+ s.Out = s.compress1xDo(s.Out, toDo)
+ if len(s.Out)-idx > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
+ // Write compressed length as little endian before block.
+ if i < 3 {
+ // Last length is not written.
+ length := len(s.Out) - idx
+ s.Out[i*2+offsetIdx] = byte(length)
+ s.Out[i*2+offsetIdx+1] = byte(length >> 8)
+ }
+ }
+
+ return s.Out, nil
+}
+
+// compress4Xp will compress 4 streams using separate goroutines.
+func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
+ if len(src) < 12 {
+ return nil, ErrIncompressible
+ }
+ // Add placeholder for output length
+ s.Out = s.Out[:6]
+
+ segmentSize := (len(src) + 3) / 4
+ var wg sync.WaitGroup
+ wg.Add(4)
+ for i := 0; i < 4; i++ {
+ toDo := src
+ if len(toDo) > segmentSize {
+ toDo = toDo[:segmentSize]
+ }
+ src = src[len(toDo):]
+
+ // Separate goroutine for each block.
+ go func(i int) {
+ s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+ for i := 0; i < 4; i++ {
+ o := s.tmpOut[i]
+ if len(o) > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
+ // Write compressed length as little endian before block.
+ if i < 3 {
+ // Last length is not written.
+ s.Out[i*2] = byte(len(o))
+ s.Out[i*2+1] = byte(len(o) >> 8)
+ }
+
+ // Write output.
+ s.Out = append(s.Out, o...)
+ }
+ return s.Out, nil
+}
+
+// countSimple will create a simple histogram in s.count.
+// Returns the biggest count.
+// Does not update s.clearCount.
+func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
+ reuse = true
+ _ = s.count // Assert that s != nil to speed up the following loop.
+ for _, v := range in {
+ s.count[v]++
+ }
+ m := uint32(0)
+ if len(s.prevTable) > 0 {
+ for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
+ if v > m {
+ m = v
+ }
+ s.symbolLen = uint16(i) + 1
+ if i >= len(s.prevTable) {
+ reuse = false
+ } else if s.prevTable[i].nBits == 0 {
+ reuse = false
+ }
+ }
+ return int(m), reuse
+ }
+ for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
+ if v > m {
+ m = v
+ }
+ s.symbolLen = uint16(i) + 1
+ }
+ return int(m), false
+}
+
+func (s *Scratch) canUseTable(c cTable) bool {
+ if len(c) < int(s.symbolLen) {
+ return false
+ }
+ for i, v := range s.count[:s.symbolLen] {
+ if v != 0 && c[i].nBits == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+//lint:ignore U1000 used for debugging
+func (s *Scratch) validateTable(c cTable) bool {
+ if len(c) < int(s.symbolLen) {
+ return false
+ }
+ for i, v := range s.count[:s.symbolLen] {
+ if v != 0 {
+ if c[i].nBits == 0 {
+ return false
+ }
+ if c[i].nBits > s.actualTableLog {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// minTableLog provides the minimum logSize to safely represent a distribution.
+func (s *Scratch) minTableLog() uint8 {
+ minBitsSrc := highBit32(uint32(s.srcLen)) + 1
+ minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
+ if minBitsSrc < minBitsSymbols {
+ return uint8(minBitsSrc)
+ }
+ return uint8(minBitsSymbols)
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *Scratch) optimalTableLog() {
+ tableLog := s.TableLog
+ minBits := s.minTableLog()
+ maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
+ if maxBitsSrc < tableLog {
+ // Accuracy can be reduced
+ tableLog = maxBitsSrc
+ }
+ if minBits > tableLog {
+ tableLog = minBits
+ }
+ // Need a minimum to safely represent all symbol values
+ if tableLog < minTablelog {
+ tableLog = minTablelog
+ }
+ if tableLog > tableLogMax {
+ tableLog = tableLogMax
+ }
+ s.actualTableLog = tableLog
+}
+
+type cTableEntry struct {
+ val uint16
+ nBits uint8
+ // We have 8 bits extra
+}
+
+const huffNodesMask = huffNodesLen - 1
+
+func (s *Scratch) buildCTable() error {
+ s.optimalTableLog()
+ s.huffSort()
+ if cap(s.cTable) < maxSymbolValue+1 {
+ s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
+ } else {
+ s.cTable = s.cTable[:s.symbolLen]
+ for i := range s.cTable {
+ s.cTable[i] = cTableEntry{}
+ }
+ }
+
+ var startNode = int16(s.symbolLen)
+ nonNullRank := s.symbolLen - 1
+
+ nodeNb := startNode
+ huffNode := s.nodes[1 : huffNodesLen+1]
+
+ // This overlays the slice above, but allows "-1" index lookups.
+ // Different from reference implementation.
+ huffNode0 := s.nodes[0 : huffNodesLen+1]
+
+ for huffNode[nonNullRank].count() == 0 {
+ nonNullRank--
+ }
+
+ lowS := int16(nonNullRank)
+ nodeRoot := nodeNb + lowS - 1
+ lowN := nodeNb
+ huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
+ huffNode[lowS].setParent(nodeNb)
+ huffNode[lowS-1].setParent(nodeNb)
+ nodeNb++
+ lowS -= 2
+ for n := nodeNb; n <= nodeRoot; n++ {
+ huffNode[n].setCount(1 << 30)
+ }
+ // fake entry, strong barrier
+ huffNode0[0].setCount(1 << 31)
+
+ // create parents
+ for nodeNb <= nodeRoot {
+ var n1, n2 int16
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
+ n1 = lowS
+ lowS--
+ } else {
+ n1 = lowN
+ lowN++
+ }
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
+ n2 = lowS
+ lowS--
+ } else {
+ n2 = lowN
+ lowN++
+ }
+
+ huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
+ huffNode0[n1+1].setParent(nodeNb)
+ huffNode0[n2+1].setParent(nodeNb)
+ nodeNb++
+ }
+
+ // distribute weights (unlimited tree height)
+ huffNode[nodeRoot].setNbBits(0)
+ for n := nodeRoot - 1; n >= startNode; n-- {
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
+ }
+ for n := uint16(0); n <= nonNullRank; n++ {
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
+ }
+ s.actualTableLog = s.setMaxHeight(int(nonNullRank))
+ maxNbBits := s.actualTableLog
+
+ // fill result into tree (val, nbBits)
+ if maxNbBits > tableLogMax {
+ return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
+ }
+ var nbPerRank [tableLogMax + 1]uint16
+ var valPerRank [16]uint16
+ for _, v := range huffNode[:nonNullRank+1] {
+ nbPerRank[v.nbBits()]++
+ }
+ // determine stating value per rank
+ {
+ min := uint16(0)
+ for n := maxNbBits; n > 0; n-- {
+ // get starting value within each rank
+ valPerRank[n] = min
+ min += nbPerRank[n]
+ min >>= 1
+ }
+ }
+
+ // push nbBits per symbol, symbol order
+ for _, v := range huffNode[:nonNullRank+1] {
+ s.cTable[v.symbol()].nBits = v.nbBits()
+ }
+
+ // assign value within rank, symbol order
+ t := s.cTable[:s.symbolLen]
+ for n, val := range t {
+ nbits := val.nBits & 15
+ v := valPerRank[nbits]
+ t[n].val = v
+ valPerRank[nbits] = v + 1
+ }
+
+ return nil
+}
+
+// huffSort will sort symbols, decreasing order.
+func (s *Scratch) huffSort() {
+ type rankPos struct {
+ base uint32
+ current uint32
+ }
+
+ // Clear nodes
+ nodes := s.nodes[:huffNodesLen+1]
+ s.nodes = nodes
+ nodes = nodes[1 : huffNodesLen+1]
+
+ // Sort into buckets based on length of symbol count.
+ var rank [32]rankPos
+ for _, v := range s.count[:s.symbolLen] {
+ r := highBit32(v+1) & 31
+ rank[r].base++
+ }
+ // maxBitLength is log2(BlockSizeMax) + 1
+ const maxBitLength = 18 + 1
+ for n := maxBitLength; n > 0; n-- {
+ rank[n-1].base += rank[n].base
+ }
+ for n := range rank[:maxBitLength] {
+ rank[n].current = rank[n].base
+ }
+ for n, c := range s.count[:s.symbolLen] {
+ r := (highBit32(c+1) + 1) & 31
+ pos := rank[r].current
+ rank[r].current++
+ prev := nodes[(pos-1)&huffNodesMask]
+ for pos > rank[r].base && c > prev.count() {
+ nodes[pos&huffNodesMask] = prev
+ pos--
+ prev = nodes[(pos-1)&huffNodesMask]
+ }
+ nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
+ }
+}
+
+func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
+ maxNbBits := s.actualTableLog
+ huffNode := s.nodes[1 : huffNodesLen+1]
+ //huffNode = huffNode[: huffNodesLen]
+
+ largestBits := huffNode[lastNonNull].nbBits()
+
+ // early exit : no elt > maxNbBits
+ if largestBits <= maxNbBits {
+ return largestBits
+ }
+ totalCost := int(0)
+ baseCost := int(1) << (largestBits - maxNbBits)
+ n := uint32(lastNonNull)
+
+ for huffNode[n].nbBits() > maxNbBits {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
+ huffNode[n].setNbBits(maxNbBits)
+ n--
+ }
+ // n stops at huffNode[n].nbBits <= maxNbBits
+
+ for huffNode[n].nbBits() == maxNbBits {
+ n--
+ }
+ // n end at index of smallest symbol using < maxNbBits
+
+ // renorm totalCost
+ totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
+
+ // repay normalized cost
+ {
+ const noSymbol = 0xF0F0F0F0
+ var rankLast [tableLogMax + 2]uint32
+
+ for i := range rankLast[:] {
+ rankLast[i] = noSymbol
+ }
+
+ // Get pos of last (smallest) symbol per rank
+ {
+ currentNbBits := maxNbBits
+ for pos := int(n); pos >= 0; pos-- {
+ if huffNode[pos].nbBits() >= currentNbBits {
+ continue
+ }
+ currentNbBits = huffNode[pos].nbBits() // < maxNbBits
+ rankLast[maxNbBits-currentNbBits] = uint32(pos)
+ }
+ }
+
+ for totalCost > 0 {
+ nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
+
+ for ; nBitsToDecrease > 1; nBitsToDecrease-- {
+ highPos := rankLast[nBitsToDecrease]
+ lowPos := rankLast[nBitsToDecrease-1]
+ if highPos == noSymbol {
+ continue
+ }
+ if lowPos == noSymbol {
+ break
+ }
+ highTotal := huffNode[highPos].count()
+ lowTotal := 2 * huffNode[lowPos].count()
+ if highTotal <= lowTotal {
+ break
+ }
+ }
+ // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
+ // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
+ // FIXME: try to remove
+ for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
+ nBitsToDecrease++
+ }
+ totalCost -= 1 << (nBitsToDecrease - 1)
+ if rankLast[nBitsToDecrease-1] == noSymbol {
+ // this rank is no longer empty
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
+ }
+ huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
+ huffNode[rankLast[nBitsToDecrease]].nbBits())
+ if rankLast[nBitsToDecrease] == 0 {
+ /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol
+ } else {
+ rankLast[nBitsToDecrease]--
+ if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
+ rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
+ }
+ }
+ }
+
+ for totalCost < 0 { /* Sometimes, cost correction overshoot */
+ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+ for huffNode[n].nbBits() == maxNbBits {
+ n--
+ }
+ huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
+ rankLast[1] = n + 1
+ totalCost++
+ continue
+ }
+ huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
+ rankLast[1]++
+ totalCost++
+ }
+ }
+ return maxNbBits
+}
+
+// A nodeElt is the fields
+//
+// count uint32
+// parent uint16
+// symbol byte
+// nbBits uint8
+//
+// in some order, all squashed into an integer so that the compiler
+// always loads and stores entire nodeElts instead of separate fields.
+type nodeElt uint64
+
+func makeNodeElt(count uint32, symbol byte) nodeElt {
+ return nodeElt(count) | nodeElt(symbol)<<48
+}
+
+func (e *nodeElt) count() uint32 { return uint32(*e) }
+func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
+func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
+func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
+
+func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
+func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
+func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
new file mode 100644
index 000000000..54bd08b25
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -0,0 +1,1167 @@
+package huff0
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/klauspost/compress/fse"
+)
+
+type dTable struct {
+ single []dEntrySingle
+}
+
+// single-symbols decoding
+type dEntrySingle struct {
+ entry uint16
+}
+
+// Uses special code for all tables that are < 8 bits.
+const use8BitTables = true
+
+// ReadTable will read a table from the input.
+// The size of the input may be larger than the table definition.
+// Any content remaining after the table definition will be returned.
+// If no Scratch is provided a new one is allocated.
+// The returned Scratch can be used for encoding or decoding input using this table.
+func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
+ s, err = s.prepare(nil)
+ if err != nil {
+ return s, nil, err
+ }
+ if len(in) <= 1 {
+ return s, nil, errors.New("input too small for table")
+ }
+ iSize := in[0]
+ in = in[1:]
+ if iSize >= 128 {
+ // Uncompressed
+ oSize := iSize - 127
+ iSize = (oSize + 1) / 2
+ if int(iSize) > len(in) {
+ return s, nil, errors.New("input too small for table")
+ }
+ for n := uint8(0); n < oSize; n += 2 {
+ v := in[n/2]
+ s.huffWeight[n] = v >> 4
+ s.huffWeight[n+1] = v & 15
+ }
+ s.symbolLen = uint16(oSize)
+ in = in[iSize:]
+ } else {
+ if len(in) < int(iSize) {
+ return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
+ }
+ // FSE compressed weights
+ s.fse.DecompressLimit = 255
+ hw := s.huffWeight[:]
+ s.fse.Out = hw
+ b, err := fse.Decompress(in[:iSize], s.fse)
+ s.fse.Out = nil
+ if err != nil {
+ return s, nil, fmt.Errorf("fse decompress returned: %w", err)
+ }
+ if len(b) > 255 {
+ return s, nil, errors.New("corrupt input: output table too large")
+ }
+ s.symbolLen = uint16(len(b))
+ in = in[iSize:]
+ }
+
+ // collect weight stats
+ var rankStats [16]uint32
+ weightTotal := uint32(0)
+ for _, v := range s.huffWeight[:s.symbolLen] {
+ if v > tableLogMax {
+ return s, nil, errors.New("corrupt input: weight too large")
+ }
+ v2 := v & 15
+ rankStats[v2]++
+ // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0.
+ weightTotal += (1 << v2) >> 1
+ }
+ if weightTotal == 0 {
+ return s, nil, errors.New("corrupt input: weights zero")
+ }
+
+ // get last non-null symbol weight (implied, total must be 2^n)
+ {
+ tableLog := highBit32(weightTotal) + 1
+ if tableLog > tableLogMax {
+ return s, nil, errors.New("corrupt input: tableLog too big")
+ }
+ s.actualTableLog = uint8(tableLog)
+ // determine last weight
+ {
+ total := uint32(1) << tableLog
+ rest := total - weightTotal
+ verif := uint32(1) << highBit32(rest)
+ lastWeight := highBit32(rest) + 1
+ if verif != rest {
+ // last value must be a clean power of 2
+ return s, nil, errors.New("corrupt input: last value not power of two")
+ }
+ s.huffWeight[s.symbolLen] = uint8(lastWeight)
+ s.symbolLen++
+ rankStats[lastWeight]++
+ }
+ }
+
+ if (rankStats[1] < 2) || (rankStats[1]&1 != 0) {
+ // by construction : at least 2 elts of rank 1, must be even
+ return s, nil, errors.New("corrupt input: min elt size, even check failed ")
+ }
+
+ // TODO: Choose between single/double symbol decoding
+
+ // Calculate starting value for each rank
+ {
+ var nextRankStart uint32
+ for n := uint8(1); n < s.actualTableLog+1; n++ {
+ current := nextRankStart
+ nextRankStart += rankStats[n] << (n - 1)
+ rankStats[n] = current
+ }
+ }
+
+ // fill DTable (always full size)
+ tSize := 1 << tableLogMax
+ if len(s.dt.single) != tSize {
+ s.dt.single = make([]dEntrySingle, tSize)
+ }
+ cTable := s.prevTable
+ if cap(cTable) < maxSymbolValue+1 {
+ cTable = make([]cTableEntry, 0, maxSymbolValue+1)
+ }
+ cTable = cTable[:maxSymbolValue+1]
+ s.prevTable = cTable[:s.symbolLen]
+ s.prevTableLog = s.actualTableLog
+
+ for n, w := range s.huffWeight[:s.symbolLen] {
+ if w == 0 {
+ cTable[n] = cTableEntry{
+ val: 0,
+ nBits: 0,
+ }
+ continue
+ }
+ length := (uint32(1) << w) >> 1
+ d := dEntrySingle{
+ entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
+ }
+
+ rank := &rankStats[w]
+ cTable[n] = cTableEntry{
+ val: uint16(*rank >> (w - 1)),
+ nBits: uint8(d.entry),
+ }
+
+ single := s.dt.single[*rank : *rank+length]
+ for i := range single {
+ single[i] = d
+ }
+ *rank += length
+ }
+
+ return s, in, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
+ if cap(s.Out) < s.MaxDecodedSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
+ }
+ s.Out = s.Out[:0:s.MaxDecodedSize]
+ s.Out, err = s.Decoder().Decompress1X(s.Out, in)
+ return s.Out, err
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// Before this is called, the table must be initialized with ReadTable unless
+// the encoder re-used the table.
+// The length of the supplied input must match the end of a block exactly.
+// The destination size of the uncompressed data must be known and provided.
+// deprecated: Use the stateless Decoder() to get a concurrent version.
+func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
+ if dstSize > s.MaxDecodedSize {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ if cap(s.Out) < dstSize {
+ s.Out = make([]byte, s.MaxDecodedSize)
+ }
+ s.Out = s.Out[:0:dstSize]
+ s.Out, err = s.Decoder().Decompress4X(s.Out, in)
+ return s.Out, err
+}
+
+// Decoder will return a stateless decoder that can be used by multiple
+// decompressors concurrently.
+// Before this is called, the table must be initialized with ReadTable.
+// The Decoder is still linked to the scratch buffer so that cannot be reused.
+// However, it is safe to discard the scratch.
+func (s *Scratch) Decoder() *Decoder {
+ return &Decoder{
+ dt: s.dt,
+ actualTableLog: s.actualTableLog,
+ bufs: &s.decPool,
+ }
+}
+
+// Decoder provides stateless decoding.
+type Decoder struct {
+ dt dTable
+ actualTableLog uint8
+ bufs *sync.Pool
+}
+
+func (d *Decoder) buffer() *[4][256]byte {
+ buf, ok := d.bufs.Get().(*[4][256]byte)
+ if ok {
+ return buf
+ }
+ return &[4][256]byte{}
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
+ if d.actualTableLog == 8 {
+ return d.decompress1X8BitExactly(dst, src)
+ }
+ var br bitReaderBytes
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ dt := d.dt.single[:256]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ switch d.actualTableLog {
+ case 8:
+ const shift = 0
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 7:
+ const shift = 8 - 7
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 6:
+ const shift = 8 - 6
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 5:
+ const shift = 8 - 5
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 4:
+ const shift = 8 - 4
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 3:
+ const shift = 8 - 3
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 2:
+ const shift = 8 - 2
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ case 1:
+ const shift = 8 - 1
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>(56+shift))]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+ default:
+ d.bufs.Put(bufs)
+ return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 4, so uint8 is fine
+ bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ shift := (8 - d.actualTableLog) & 7
+
+ for bitsLeft > 0 {
+ if br.bitsRead >= 64-8 {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := dt[br.peekByteFast()>>shift]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= int8(nBits)
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
+
+// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
+ var br bitReaderBytes
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ dt := d.dt.single[:256]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ const shift = 56
+
+ //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
+ for br.off >= 4 {
+ br.fillFast()
+ v := dt[uint8(br.value>>shift)]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>shift)]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>shift)]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[uint8(br.value>>shift)]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 4, so uint8 is fine
+ bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
+ for bitsLeft > 0 {
+ if br.bitsRead >= 64-8 {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := dt[br.peekByteFast()]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= int8(nBits)
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
+ if d.actualTableLog == 8 {
+ return d.decompress4X8bitExactly(dst, src)
+ }
+
+ var br [4]bitReaderBytes
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ shift := (56 + (8 - d.actualTableLog)) & 63
+
+ const tlSize = 1 << 8
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 4 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ // Interleave 2 decodes.
+ const stream = 0
+ const stream2 = 1
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
+ }
+
+ off += 4
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ // There must at least be 3 buffers left.
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ if br.finished() {
+ d.bufs.Put(buf)
+ return nil, io.ErrUnexpectedEOF
+ }
+ if br.bitsRead >= 56 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value |= uint64(low) << (br.bitsRead - 32)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ // end inline...
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ v := single[uint8(br.value>>shift)].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ d.bufs.Put(buf)
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
+ var br [4]bitReaderBytes
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const shift = 56
+ const tlSize = 1 << 8
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 4 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ // Interleave 2 decodes.
+ const stream = 0
+ const stream2 = 1
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
+ }
+
+ off += 4
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ // There must at least be 3 buffers left.
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ if br.finished() {
+ d.bufs.Put(buf)
+ return nil, io.ErrUnexpectedEOF
+ }
+ if br.bitsRead >= 56 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value |= uint64(low) << (br.bitsRead - 32)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ // end inline...
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ v := single[br.peekByteFast()].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ d.bufs.Put(buf)
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// matches will compare a decoding table to a coding table.
+// Errors are written to the writer.
+// Nothing will be written if table is ok.
+func (s *Scratch) matches(ct cTable, w io.Writer) {
+ if s == nil || len(s.dt.single) == 0 {
+ return
+ }
+ dt := s.dt.single[:1<>8) == byte(sym) {
+ fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
+ errs++
+ break
+ }
+ }
+ if errs == 0 {
+ broken--
+ }
+ continue
+ }
+ // Unused bits in input
+ ub := tablelog - enc.nBits
+ top := enc.val << ub
+ // decoder looks at top bits.
+ dec := dt[top]
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
+ errs++
+ }
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
+ errs++
+ }
+ if errs > 0 {
+ fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ continue
+ }
+ // Ensure that all combinations are covered.
+ for i := uint16(0); i < (1 << ub); i++ {
+ vval := top | i
+ dec := dt[vval]
+ if uint8(dec.entry) != enc.nBits {
+ fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
+ errs++
+ }
+ if uint8(dec.entry>>8) != uint8(sym) {
+ fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
+ errs++
+ }
+ if errs > 20 {
+ fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ break
+ }
+ }
+ if errs == 0 {
+ ok++
+ broken--
+ }
+ }
+ if broken > 0 {
+ fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 000000000..ba7e8e6b0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,226 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// and Decoder.Decompress1X that use an asm implementation of thir main loops.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+//
+//go:noescape
+func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+//
+//go:noescape
+func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+type decompress4xContext struct {
+ pbr *[4]bitReaderShifted
+ peekBits uint8
+ out *byte
+ dstEvery int
+ tbl *dEntrySingle
+ decoded int
+ limit *byte
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ var decoded int
+
+ if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
+ ctx := decompress4xContext{
+ pbr: &br,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ out: &out[0],
+ dstEvery: dstEvery,
+ tbl: &single[0],
+ limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
+ }
+ if use8BitTables {
+ decompress4x_8b_main_loop_amd64(&ctx)
+ } else {
+ decompress4x_main_loop_amd64(&ctx)
+ }
+
+ decoded = ctx.decoded
+ out = out[decoded/4:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+
+type decompress1xContext struct {
+ pbr *bitReaderShifted
+ peekBits uint8
+ out *byte
+ outCap int
+ tbl *dEntrySingle
+ decoded int
+}
+
+// Error reported by asm implementations
+const error_max_decoded_size_exeeded = -1
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:maxDecodedSize]
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+
+ if maxDecodedSize >= 4 {
+ ctx := decompress1xContext{
+ pbr: &br,
+ out: &dst[0],
+ outCap: maxDecodedSize,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ tbl: &d.dt.single[0],
+ }
+
+ if cpuinfo.HasBMI2() {
+ decompress1x_main_loop_bmi2(&ctx)
+ } else {
+ decompress1x_main_loop_amd64(&ctx)
+ }
+ if ctx.decoded == error_max_decoded_size_exeeded {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+
+ dst = dst[:ctx.decoded]
+ }
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 000000000..c4c7ab2d1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,830 @@
+// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
+
+//go:build amd64 && !appengine && !noasm && gc
+
+// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_main_loop_amd64(SB), $0-8
+ // Preload values
+ MOVQ ctx+0(FP), AX
+ MOVBQZX 8(AX), DI
+ MOVQ 16(AX), BX
+ MOVQ 48(AX), SI
+ MOVQ 24(AX), R8
+ MOVQ 32(AX), R9
+ MOVQ (AX), R10
+
+ // Main loop
+main_loop:
+ XORL DX, DX
+ CMPQ BX, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill0
+ MOVQ 24(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ (R10), R13
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 24(R10)
+ ORQ R13, R11
+
+ // exhausted += (br0.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
+
+ // v1 := table[val1&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)
+
+ // update the bitreader structure
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
+
+ // br1.fillFast32()
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill1
+ MOVQ 72(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 48(R10), R13
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 72(R10)
+ ORQ R13, R11
+
+ // exhausted += (br1.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
+
+ // v1 := table[val1&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)(R8*1)
+
+ // update the bitreader structure
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
+
+ // br2.fillFast32()
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill2
+ MOVQ 120(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 96(R10), R13
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 120(R10)
+ ORQ R13, R11
+
+ // exhausted += (br2.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
+
+ // v1 := table[val1&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)(R8*2)
+
+ // update the bitreader structure
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
+
+ // br3.fillFast32()
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill3
+ MOVQ 168(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 144(R10), R13
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 168(R10)
+ ORQ R13, R11
+
+ // exhausted += (br3.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
+
+ // v1 := table[val1&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ LEAQ (R8)(R8*2), CX
+ MOVW AX, (BX)(CX*1)
+
+ // update the bitreader structure
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
+ ADDQ $0x02, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
+
+// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
+ // Preload values
+ MOVQ ctx+0(FP), CX
+ MOVBQZX 8(CX), DI
+ MOVQ 16(CX), BX
+ MOVQ 48(CX), SI
+ MOVQ 24(CX), R8
+ MOVQ 32(CX), R9
+ MOVQ (CX), R10
+
+ // Main loop
+main_loop:
+ XORL DX, DX
+ CMPQ BX, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill0
+ MOVQ 24(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ (R10), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 24(R10)
+ ORQ R14, R11
+
+ // exhausted += (br0.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)
+
+ // update the bitreader structure
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
+
+ // br1.fillFast32()
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill1
+ MOVQ 72(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 48(R10), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 72(R10)
+ ORQ R14, R11
+
+ // exhausted += (br1.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)(R8*1)
+
+ // update the bitreader structure
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
+
+ // br2.fillFast32()
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill2
+ MOVQ 120(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 96(R10), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 120(R10)
+ ORQ R14, R11
+
+ // exhausted += (br2.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)(R8*2)
+
+ // update the bitreader structure
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
+
+ // br3.fillFast32()
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill3
+ MOVQ 168(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 144(R10), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 168(R10)
+ ORQ R14, R11
+
+ // exhausted += (br3.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ LEAQ (R8)(R8*2), CX
+ MOVL AX, (BX)(CX*1)
+
+ // update the bitreader structure
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
+ ADDQ $0x04, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exceeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exceeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_1_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_2_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exceeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+// Requires: BMI2
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exceeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exceeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_1_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_2_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exceeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 000000000..908c17de6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,299 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ // There must at least be 3 buffers left.
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ dt := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ for br.off >= 8 {
+ br.fillFast()
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
+ br.fillFast()
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
new file mode 100644
index 000000000..77ecd68e0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -0,0 +1,337 @@
+// Package huff0 provides fast huffman encoding as used in zstd.
+//
+// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "sync"
+
+ "github.com/klauspost/compress/fse"
+)
+
+const (
+ maxSymbolValue = 255
+
+ // zstandard limits tablelog to 11, see:
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
+ tableLogMax = 11
+ tableLogDefault = 11
+ minTablelog = 5
+ huffNodesLen = 512
+
+ // BlockSizeMax is maximum input size for a single block uncompressed.
+ BlockSizeMax = 1<<18 - 1
+)
+
+var (
+ // ErrIncompressible is returned when input is judged to be too hard to compress.
+ ErrIncompressible = errors.New("input is not compressible")
+
+ // ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
+ ErrUseRLE = errors.New("input is single value repeated")
+
+ // ErrTooBig is return if input is too large for a single block.
+ ErrTooBig = errors.New("input too big")
+
+ // ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
+ ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
+)
+
+type ReusePolicy uint8
+
+const (
+ // ReusePolicyAllow will allow reuse if it produces smaller output.
+ ReusePolicyAllow ReusePolicy = iota
+
+ // ReusePolicyPrefer will re-use aggressively if possible.
+ // This will not check if a new table will produce smaller output,
+ // except if the current table is impossible to use or
+ // compressed output is bigger than input.
+ ReusePolicyPrefer
+
+ // ReusePolicyNone will disable re-use of tables.
+ // This is slightly faster than ReusePolicyAllow but may produce larger output.
+ ReusePolicyNone
+
+ // ReusePolicyMust must allow reuse and produce smaller output.
+ ReusePolicyMust
+)
+
+type Scratch struct {
+ count [maxSymbolValue + 1]uint32
+
+ // Per block parameters.
+ // These can be used to override compression parameters of the block.
+ // Do not touch, unless you know what you are doing.
+
+ // Out is output buffer.
+ // If the scratch is re-used before the caller is done processing the output,
+ // set this field to nil.
+ // Otherwise the output buffer will be re-used for next Compression/Decompression step
+ // and allocation will be avoided.
+ Out []byte
+
+ // OutTable will contain the table data only, if a new table has been generated.
+ // Slice of the returned data.
+ OutTable []byte
+
+ // OutData will contain the compressed data.
+ // Slice of the returned data.
+ OutData []byte
+
+ // MaxDecodedSize will set the maximum allowed output size.
+ // This value will automatically be set to BlockSizeMax if not set.
+ // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
+ MaxDecodedSize int
+
+ srcLen int
+
+ // MaxSymbolValue will override the maximum symbol value of the next block.
+ MaxSymbolValue uint8
+
+ // TableLog will attempt to override the tablelog for the next block.
+ // Must be <= 11 and >= 5.
+ TableLog uint8
+
+ // Reuse will specify the reuse policy
+ Reuse ReusePolicy
+
+ // WantLogLess allows to specify a log 2 reduction that should at least be achieved,
+ // otherwise the block will be returned as incompressible.
+ // The reduction should then at least be (input size >> WantLogLess)
+ // If WantLogLess == 0 any improvement will do.
+ WantLogLess uint8
+
+ symbolLen uint16 // Length of active part of the symbol table.
+ maxCount int // count of the most probable symbol
+ clearCount bool // clear count
+ actualTableLog uint8 // Selected tablelog.
+ prevTableLog uint8 // Tablelog for previous table
+ prevTable cTable // Table used for previous compression.
+ cTable cTable // compression table
+ dt dTable // decompression table
+ nodes []nodeElt
+ tmpOut [4][]byte
+ fse *fse.Scratch
+ decPool sync.Pool // *[4][256]byte buffers.
+ huffWeight [maxSymbolValue + 1]byte
+}
+
+// TransferCTable will transfer the previously used compression table.
+func (s *Scratch) TransferCTable(src *Scratch) {
+ if cap(s.prevTable) < len(src.prevTable) {
+ s.prevTable = make(cTable, 0, maxSymbolValue+1)
+ }
+ s.prevTable = s.prevTable[:len(src.prevTable)]
+ copy(s.prevTable, src.prevTable)
+ s.prevTableLog = src.prevTableLog
+}
+
+func (s *Scratch) prepare(in []byte) (*Scratch, error) {
+ if len(in) > BlockSizeMax {
+ return nil, ErrTooBig
+ }
+ if s == nil {
+ s = &Scratch{}
+ }
+ if s.MaxSymbolValue == 0 {
+ s.MaxSymbolValue = maxSymbolValue
+ }
+ if s.TableLog == 0 {
+ s.TableLog = tableLogDefault
+ }
+ if s.TableLog > tableLogMax || s.TableLog < minTablelog {
+ return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
+ }
+ if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
+ s.MaxDecodedSize = BlockSizeMax
+ }
+ if s.clearCount && s.maxCount == 0 {
+ for i := range s.count {
+ s.count[i] = 0
+ }
+ s.clearCount = false
+ }
+ if cap(s.Out) == 0 {
+ s.Out = make([]byte, 0, len(in))
+ }
+ s.Out = s.Out[:0]
+
+ s.OutTable = nil
+ s.OutData = nil
+ if cap(s.nodes) < huffNodesLen+1 {
+ s.nodes = make([]nodeElt, 0, huffNodesLen+1)
+ }
+ s.nodes = s.nodes[:0]
+ if s.fse == nil {
+ s.fse = &fse.Scratch{}
+ }
+ s.srcLen = len(in)
+
+ return s, nil
+}
+
+type cTable []cTableEntry
+
+func (c cTable) write(s *Scratch) error {
+ var (
+ // precomputed conversion table
+ bitsToWeight [tableLogMax + 1]byte
+ huffLog = s.actualTableLog
+ // last weight is not saved.
+ maxSymbolValue = uint8(s.symbolLen - 1)
+ huffWeight = s.huffWeight[:256]
+ )
+ const (
+ maxFSETableLog = 6
+ )
+ // convert to weight
+ bitsToWeight[0] = 0
+ for n := uint8(1); n < huffLog+1; n++ {
+ bitsToWeight[n] = huffLog + 1 - n
+ }
+
+ // Acquire histogram for FSE.
+ hist := s.fse.Histogram()
+ hist = hist[:256]
+ for i := range hist[:16] {
+ hist[i] = 0
+ }
+ for n := uint8(0); n < maxSymbolValue; n++ {
+ v := bitsToWeight[c[n].nBits] & 15
+ huffWeight[n] = v
+ hist[v]++
+ }
+
+ // FSE compress if feasible.
+ if maxSymbolValue >= 2 {
+ huffMaxCnt := uint32(0)
+ huffMax := uint8(0)
+ for i, v := range hist[:16] {
+ if v == 0 {
+ continue
+ }
+ huffMax = byte(i)
+ if v > huffMaxCnt {
+ huffMaxCnt = v
+ }
+ }
+ s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+ s.fse.TableLog = maxFSETableLog
+ b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+ if err == nil && len(b) < int(s.symbolLen>>1) {
+ s.Out = append(s.Out, uint8(len(b)))
+ s.Out = append(s.Out, b...)
+ return nil
+ }
+ // Unable to compress (RLE/uncompressible)
+ }
+ // write raw values as 4-bits (max : 15)
+ if maxSymbolValue > (256 - 128) {
+ // should not happen : likely means source cannot be compressed
+ return ErrIncompressible
+ }
+ op := s.Out
+ // special case, pack weights 4 bits/weight.
+ op = append(op, 128|(maxSymbolValue-1))
+ // be sure it doesn't cause msan issue in final combination
+ huffWeight[maxSymbolValue] = 0
+ for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
+ op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
+ }
+ s.Out = op
+ return nil
+}
+
+func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
+ var (
+ // precomputed conversion table
+ bitsToWeight [tableLogMax + 1]byte
+ huffLog = s.actualTableLog
+ // last weight is not saved.
+ maxSymbolValue = uint8(s.symbolLen - 1)
+ huffWeight = s.huffWeight[:256]
+ )
+ const (
+ maxFSETableLog = 6
+ )
+ // convert to weight
+ bitsToWeight[0] = 0
+ for n := uint8(1); n < huffLog+1; n++ {
+ bitsToWeight[n] = huffLog + 1 - n
+ }
+
+ // Acquire histogram for FSE.
+ hist := s.fse.Histogram()
+ hist = hist[:256]
+ for i := range hist[:16] {
+ hist[i] = 0
+ }
+ for n := uint8(0); n < maxSymbolValue; n++ {
+ v := bitsToWeight[c[n].nBits] & 15
+ huffWeight[n] = v
+ hist[v]++
+ }
+
+ // FSE compress if feasible.
+ if maxSymbolValue >= 2 {
+ huffMaxCnt := uint32(0)
+ huffMax := uint8(0)
+ for i, v := range hist[:16] {
+ if v == 0 {
+ continue
+ }
+ huffMax = byte(i)
+ if v > huffMaxCnt {
+ huffMaxCnt = v
+ }
+ }
+ s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
+ s.fse.TableLog = maxFSETableLog
+ b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
+ if err == nil && len(b) < int(s.symbolLen>>1) {
+ sz += 1 + len(b)
+ return sz, nil
+ }
+ // Unable to compress (RLE/uncompressible)
+ }
+ // write raw values as 4-bits (max : 15)
+ if maxSymbolValue > (256 - 128) {
+ // should not happen : likely means source cannot be compressed
+ return 0, ErrIncompressible
+ }
+ // special case, pack weights 4 bits/weight.
+ sz += 1 + int(maxSymbolValue/2)
+ return sz, nil
+}
+
+// estimateSize returns the estimated size in bytes of the input represented in the
+// histogram supplied.
+func (c cTable) estimateSize(hist []uint32) int {
+ nbBits := uint32(7)
+ for i, v := range c[:len(hist)] {
+ nbBits += uint32(v.nBits) * hist[i]
+ }
+ return int(nbBits >> 3)
+}
+
+// minSize returns the minimum possible size considering the shannon limit.
+func (s *Scratch) minSize(total int) int {
+ nbBits := float64(7)
+ fTotal := float64(total)
+ for _, v := range s.count[:s.symbolLen] {
+ n := float64(v)
+ if n > 0 {
+ nbBits += math.Log2(fTotal/n) * n
+ }
+ }
+ return int(nbBits) >> 3
+}
+
+func highBit32(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
new file mode 100644
index 000000000..3954c5121
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
@@ -0,0 +1,34 @@
+// Package cpuinfo gives runtime info about the current CPU.
+//
+// This is a very limited module meant for use internally
+// in this project. For more versatile solution check
+// https://github.com/klauspost/cpuid.
+package cpuinfo
+
+// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
+func HasBMI1() bool {
+ return hasBMI1
+}
+
+// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
+func HasBMI2() bool {
+ return hasBMI2
+}
+
+// DisableBMI2 will disable BMI2, for testing purposes.
+// Call returned function to restore previous state.
+func DisableBMI2() func() {
+ old := hasBMI2
+ hasBMI2 = false
+ return func() {
+ hasBMI2 = old
+ }
+}
+
+// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
+func HasBMI() bool {
+ return HasBMI1() && HasBMI2()
+}
+
+var hasBMI1 bool
+var hasBMI2 bool
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
new file mode 100644
index 000000000..e802579c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
@@ -0,0 +1,11 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package cpuinfo
+
+// go:noescape
+func x86extensions() (bmi1, bmi2 bool)
+
+func init() {
+ hasBMI1, hasBMI2 = x86extensions()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
new file mode 100644
index 000000000..4465fbe9e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
@@ -0,0 +1,36 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+TEXT ·x86extensions(SB), NOSPLIT, $0
+ // 1. determine max EAX value
+ XORQ AX, AX
+ CPUID
+
+ CMPQ AX, $7
+ JB unsupported
+
+ // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
+ MOVQ $7, AX
+ MOVQ $0, CX
+ CPUID
+
+ BTQ $3, BX // bit 3 = BMI1
+ SETCS AL
+
+ BTQ $8, BX // bit 8 = BMI2
+ SETCS AH
+
+ MOVB AL, bmi1+0(FP)
+ MOVB AH, bmi2+1(FP)
+ RET
+
+unsupported:
+ XORQ AX, AX
+ MOVB AL, bmi1+0(FP)
+ MOVB AL, bmi2+1(FP)
+ RET
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE
similarity index 92%
rename from vendor/github.com/imdario/mergo/LICENSE
rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE
index 686680298..6050c10f4 100644
--- a/vendor/github.com/imdario/mergo/LICENSE
+++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE
@@ -1,5 +1,4 @@
-Copyright (c) 2013 Dario Castañé. All rights reserved.
-Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go
new file mode 100644
index 000000000..40796a49d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go
@@ -0,0 +1,264 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Decode handles the Snappy block format, not the Snappy stream format.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+//
+// Reader handles the Snappy stream format, not the Snappy block format.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+func (r *Reader) fill() error {
+ for r.i >= r.j {
+ if !r.readFull(r.buf[:4], true) {
+ return r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return r.err
+ }
+ }
+
+ return nil
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+
+ if err := r.fill(); err != nil {
+ return 0, err
+ }
+
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+// ReadByte satisfies the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+
+ if err := r.fill(); err != nil {
+ return 0, err
+ }
+
+ c := r.decoded[r.i]
+ r.i++
+ return c, nil
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go
new file mode 100644
index 000000000..77395a6b8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go
@@ -0,0 +1,113 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset >= length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go
new file mode 100644
index 000000000..13c6040a5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go
@@ -0,0 +1,289 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// Encode handles the Snappy block format, not the Snappy stream format.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+//
+// Writer handles the Snappy stream format, not the Snappy block format.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
new file mode 100644
index 000000000..2754bac6f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -0,0 +1,250 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snapref
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+ if MaxEncodedLen(len(src)) > len(dst) {
+ return 0
+ }
+
+ // encodeBlock breaks on too big blocks, so split.
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return d
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go
new file mode 100644
index 000000000..34d01f4aa
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snapref implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snapref
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
new file mode 100644
index 000000000..5a4412f90
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -0,0 +1,4 @@
+module github.com/klauspost/compress
+
+go 1.19
+
diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
new file mode 100644
index 000000000..92e2347bb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -0,0 +1,441 @@
+# zstd
+
+[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios.
+It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder.
+A high performance compression algorithm is implemented. For now focused on speed.
+
+This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
+
+This package is pure Go and without use of "unsafe".
+
+The `zstd` package is provided as open source software using a Go standard license.
+
+Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
+## Installation
+
+Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
+
+[](https://pkg.go.dev/github.com/klauspost/compress/zstd)
+
+## Compressor
+
+### Status:
+
+STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
+used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates.
+
+There may still be specific combinations of data types/size/settings that could lead to edge cases,
+so as always, testing is recommended.
+
+For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
+
+* The "Fastest" compression ratio is roughly equivalent to zstd level 1.
+* The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
+* The "Better" compression ratio is roughly equivalent to zstd level 7.
+* The "Best" compression ratio is roughly equivalent to zstd level 11.
+
+In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode.
+The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
+
+
+### Usage
+
+An Encoder can be used for either compressing a stream via the
+`io.WriteCloser` interface supported by the Encoder or as multiple independent
+tasks via the `EncodeAll` function.
+Smaller encodes are encouraged to use the EncodeAll function.
+Use `NewWriter` to create a new instance that can be used for both.
+
+To create a writer with default options, do like this:
+
+```Go
+// Compress input to output.
+func Compress(in io.Reader, out io.Writer) error {
+ enc, err := zstd.NewWriter(out)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(enc, in)
+ if err != nil {
+ enc.Close()
+ return err
+ }
+ return enc.Close()
+}
+```
+
+Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called.
+Even if your encode fails, you should still call `Close()` to release any resources that may be held up.
+
+The above is fine for big encodes. However, whenever possible try to *reuse* the writer.
+
+To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output.
+This will allow the encoder to reuse all resources and avoid wasteful allocations.
+
+Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part
+of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change
+in the future. So if you want to limit concurrency for future updates, specify the concurrency
+you would like.
+
+If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
+which will compress input as each block is completed, blocking on writes until each has completed.
+
+You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
+compression settings can be specified.
+
+#### Future Compatibility Guarantees
+
+This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change.
+
+The goal will be to keep the default efficiency at the default zstd (level 3).
+However the encoding should never be assumed to remain the same,
+and you should not use hashes of compressed output for similarity checks.
+
+The Encoder can be assumed to produce the same output from the exact same code version.
+However, the may be modes in the future that break this,
+although they will not be enabled without an explicit option.
+
+This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder.
+
+Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59),
+[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43)
+and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames).
+
+#### Blocks
+
+For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
+
+`EncodeAll` will encode all input in src and append it to dst.
+This function can be called concurrently.
+Each call will only run on a same goroutine as the caller.
+
+Encoded blocks can be concatenated and the result will be the combined input stream.
+Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
+
+Especially when encoding blocks you should take special care to reuse the encoder.
+This will effectively make it run without allocations after a warmup period.
+To make it run completely without allocations, supply a destination buffer with space for all content.
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a writer that caches compressors.
+// For this operation type we supply a nil Reader.
+var encoder, _ = zstd.NewWriter(nil)
+
+// Compress a buffer.
+// If you have a destination buffer, the allocation in the call can also be eliminated.
+func Compress(src []byte) []byte {
+ return encoder.EncodeAll(src, make([]byte, 0, len(src)))
+}
+```
+
+You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)`
+option when creating the writer.
+
+Using the Encoder for both a stream and individual blocks concurrently is safe.
+
+### Performance
+
+I have collected some speed examples to compare speed and compression against other compressors.
+
+* `file` is the input file.
+* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library.
+* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best".
+* `insize`/`outsize` is the input/output size.
+* `millis` is the number of milliseconds used for compression.
+* `mb/s` is megabytes (2^20 bytes) per second.
+
+```
+Silesia Corpus:
+http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
+
+This package:
+file out level insize outsize millis mb/s
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
+
+cgo zstd:
+silesia.tar zstd 1 211947520 73605392 543 371.56
+silesia.tar zstd 3 211947520 66793289 864 233.68
+silesia.tar zstd 6 211947520 62916450 1913 105.66
+silesia.tar zstd 9 211947520 60212393 5063 39.92
+
+gzip, stdlib/this package:
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
+
+GOB stream of binary data. Highly compressible.
+https://files.klauspost.com/compress/gob-stream.7z
+
+file out level insize outsize millis mb/s
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
+
+gob-stream zstd 1 1911399616 249810424 2637 691.26
+gob-stream zstd 3 1911399616 208192146 3490 522.31
+gob-stream zstd 6 1911399616 193632038 6687 272.56
+gob-stream zstd 9 1911399616 177620386 16175 112.70
+
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
+
+The test data for the Large Text Compression Benchmark is the first
+10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
+http://mattmahoney.net/dc/textdata.html
+
+file out level insize outsize millis mb/s
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
+
+enwik9 zstd 1 1000000000 358072021 3110 306.65
+enwik9 zstd 3 1000000000 313734672 4784 199.35
+enwik9 zstd 6 1000000000 295138875 10290 92.68
+enwik9 zstd 9 1000000000 278348700 28549 33.40
+
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
+
+Highly compressible JSON file.
+https://files.klauspost.com/compress/github-june-2days-2019.json.zst
+
+file out level insize outsize millis mb/s
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
+
+github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
+github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
+github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
+github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
+
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
+
+VM Image, Linux mint with a few installed applications:
+https://files.klauspost.com/compress/rawstudio-mint14.7z
+
+file out level insize outsize millis mb/s
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
+
+rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
+rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
+rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
+rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
+
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
+
+CSV data:
+https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
+
+file out level insize outsize millis mb/s
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
+
+nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
+nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
+nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
+nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
+
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
+```
+
+## Decompressor
+
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+
+This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
+kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
+The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
+or run it past its limits with ANY input provided.
+
+### Usage
+
+The package has been designed for two main usages, big streams of data and smaller in-memory buffers.
+There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`.
+
+For streaming use a simple setup could look like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+func Decompress(in io.Reader, out io.Writer) error {
+ d, err := zstd.NewReader(in)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+
+ // Copy content...
+ _, err = io.Copy(out, d)
+ return err
+}
+```
+
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
+when running with default settings.
+Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
+
+Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
+However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
+as it is being requested only.
+
+For decoding buffers, it could look something like this:
+
+```Go
+import "github.com/klauspost/compress/zstd"
+
+// Create a reader that caches decompressors.
+// For this operation type we supply a nil Reader.
+var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
+
+// Decompress a buffer. We don't supply a destination buffer,
+// so it will be allocated by the decoder.
+func Decompress(src []byte) ([]byte, error) {
+ return decoder.DecodeAll(src, nil)
+}
+```
+
+Both of these cases should provide the functionality needed.
+The decoder can be used for *concurrent* decompression of multiple buffers.
+By default 4 decompressors will be created.
+
+It will only allow a certain number of concurrent operations to run.
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
+
+### Dictionaries
+
+Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed.
+
+Dictionaries are added individually to Decoders.
+Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder.
+To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data.
+Several dictionaries can be added at once.
+
+The dictionary will be used automatically for the data that specifies them.
+A re-used Decoder will still contain the dictionaries registered.
+
+When registering multiple dictionaries with the same ID, the last one will be used.
+
+It is possible to use dictionaries when compressing data.
+
+To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used
+and it will likely be used even if it doesn't improve compression.
+
+The used dictionary must be used to decompress the content.
+
+For any real gains, the dictionary should be built with similar data.
+If an unsuitable dictionary is used the output may be slightly larger than using no dictionary.
+Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data.
+For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression).
+
+For now there is a fixed startup performance penalty for compressing content with dictionaries.
+This will likely be improved over time. Just be aware to test performance when implementing.
+
+### Allocation-less operation
+
+The decoder has been designed to operate without allocations after a warmup.
+
+This means that you should *store* the decoder for best performance.
+To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream.
+A decoder can safely be re-used even if the previous stream failed.
+
+To release the resources, you must call the `Close()` function on a decoder.
+After this it can *no longer be reused*, but all running goroutines will be stopped.
+So you *must* use this if you will no longer need the Reader.
+
+For decompressing smaller buffers a single decoder can be used.
+When decoding buffers, you can supply a destination slice with length 0 and your expected capacity.
+In this case no unneeded allocations should be made.
+
+### Concurrency
+
+The buffer decoder does everything on the same goroutine and does nothing concurrently.
+It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
+
+The stream decoder will create goroutines that:
+
+1) Reads input and splits the input into blocks.
+2) Decompression of literals.
+3) Decompression of sequences.
+4) Reconstruction of output stream.
+
+So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+
+The concurrency level will, for streams, determine how many blocks ahead the compression will start.
+
+Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
+
+In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
+
+### Benchmarks
+
+The first two are streaming decodes and the last are smaller inputs.
+
+Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
+
+```
+BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
+BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
+
+Concurrent blocks, performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
+```
+
+This reflects the performance around May 2022, but this may be out of date.
+
+## Zstd inside ZIP files
+
+It is possible to use zstandard to compress individual files inside zip archives.
+While this isn't widely supported it can be useful for internal files.
+
+To support the compression and decompression of these files you must register a compressor and decompressor.
+
+It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT
+use the global registration functions. The main reason for this is that 2 registrations from
+different packages will result in a panic.
+
+It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip
+files concurrently, and using a single instance will allow reusing some resources.
+
+See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for
+how to compress and decompress files inside zip archives.
+
+# Contributions
+
+Contributions are always welcome.
+For new features/fixes, remember to add tests and for performance enhancements include benchmarks.
+
+For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
+
+This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
new file mode 100644
index 000000000..25ca98394
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -0,0 +1,136 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/bits"
+)
+
+// bitReader reads a bitstream in reverse.
+// The last set bit indicates the start of the stream and is used
+// for aligning the input.
+type bitReader struct {
+ in []byte
+ value uint64 // Maybe use [16]byte, but shifting is awkward.
+ bitsRead uint8
+}
+
+// init initializes and resets the bit reader.
+func (b *bitReader) init(in []byte) error {
+ if len(in) < 1 {
+ return errors.New("corrupt stream: too short")
+ }
+ b.in = in
+ // The highest bit of the last byte indicates where to start
+ v := in[len(in)-1]
+ if v == 0 {
+ return errors.New("corrupt stream, did not find end of stream")
+ }
+ b.bitsRead = 64
+ b.value = 0
+ if len(in) >= 8 {
+ b.fillFastStart()
+ } else {
+ b.fill()
+ b.fill()
+ }
+ b.bitsRead += 8 - uint8(highBits(uint32(v)))
+ return nil
+}
+
+// getBits will return n bits. n can be 0.
+func (b *bitReader) getBits(n uint8) int {
+ if n == 0 /*|| b.bitsRead >= 64 */ {
+ return 0
+ }
+ return int(b.get32BitsFast(n))
+}
+
+// get32BitsFast requires that at least one bit is requested every time.
+// There are no checks if the buffer is filled.
+func (b *bitReader) get32BitsFast(n uint8) uint32 {
+ const regMask = 64 - 1
+ v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+ b.bitsRead += n
+ return v
+}
+
+// fillFast() will make sure at least 32 bits are available.
+// There must be at least 4 bytes available.
+func (b *bitReader) fillFast() {
+ if b.bitsRead < 32 {
+ return
+ }
+ v := b.in[len(b.in)-4:]
+ b.in = b.in[:len(b.in)-4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+}
+
+// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
+func (b *bitReader) fillFastStart() {
+ v := b.in[len(b.in)-8:]
+ b.in = b.in[:len(b.in)-8]
+ b.value = binary.LittleEndian.Uint64(v)
+ b.bitsRead = 0
+}
+
+// fill() will make sure at least 32 bits are available.
+func (b *bitReader) fill() {
+ if b.bitsRead < 32 {
+ return
+ }
+ if len(b.in) >= 4 {
+ v := b.in[len(b.in)-4:]
+ b.in = b.in[:len(b.in)-4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ b.value = (b.value << 32) | uint64(low)
+ b.bitsRead -= 32
+ return
+ }
+
+ b.bitsRead -= uint8(8 * len(b.in))
+ for len(b.in) > 0 {
+ b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
+ b.in = b.in[:len(b.in)-1]
+ }
+}
+
+// finished returns true if all bits have been read from the bit stream.
+func (b *bitReader) finished() bool {
+ return len(b.in) == 0 && b.bitsRead >= 64
+}
+
+// overread returns true if more bits have been requested than is on the stream.
+func (b *bitReader) overread() bool {
+ return b.bitsRead > 64
+}
+
+// remain returns the number of bits remaining.
+func (b *bitReader) remain() uint {
+ return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
+}
+
+// close the bitstream and returns an error if out-of-buffer reads occurred.
+func (b *bitReader) close() error {
+ // Release reference.
+ b.in = nil
+ if !b.finished() {
+ return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
+ }
+ if b.bitsRead > 64 {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func highBits(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
new file mode 100644
index 000000000..1952f175b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -0,0 +1,112 @@
+// Copyright 2018 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
+
+package zstd
+
+// bitWriter will write bits.
+// First bit will be LSB of the first byte of output.
+type bitWriter struct {
+ bitContainer uint64
+ nBits uint8
+ out []byte
+}
+
+// bitMask16 is bitmasks. Has extra to avoid bounds check.
+var bitMask16 = [32]uint16{
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
+ 0xFFFF, 0xFFFF} /* up to 16 bits */
+
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// addBits16NC will add up to 16 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits32NC will add up to 31 bits.
+// It will not check if there is space for them,
+// so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits64NC will add up to 64 bits.
+// There must be space for 32 bits.
+func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
+ if bits <= 31 {
+ b.addBits32Clean(uint32(value), bits)
+ return
+ }
+ b.addBits32Clean(uint32(value), 32)
+ b.flush32()
+ b.addBits32Clean(uint32(value>>32), bits-32)
+}
+
+// addBits32Clean will add up to 32 bits.
+// It will not check if there is space for them.
+// The input must not contain more bits than specified.
+func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
+// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
+func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
+// flush32 will flush out, so there are at least 32 bits available for writing.
+func (b *bitWriter) flush32() {
+ if b.nBits < 32 {
+ return
+ }
+ b.out = append(b.out,
+ byte(b.bitContainer),
+ byte(b.bitContainer>>8),
+ byte(b.bitContainer>>16),
+ byte(b.bitContainer>>24))
+ b.nBits -= 32
+ b.bitContainer >>= 32
+}
+
+// flushAlign will flush remaining full bytes and align to next byte boundary.
+func (b *bitWriter) flushAlign() {
+ nbBytes := (b.nBits + 7) >> 3
+ for i := uint8(0); i < nbBytes; i++ {
+ b.out = append(b.out, byte(b.bitContainer>>(i*8)))
+ }
+ b.nBits = 0
+ b.bitContainer = 0
+}
+
+// close will write the alignment bit and write the final byte(s)
+// to the output.
+func (b *bitWriter) close() {
+ // End mark
+ b.addBits16Clean(1, 1)
+ // flush until next byte.
+ b.flushAlign()
+}
+
+// reset and continue writing by appending to out.
+func (b *bitWriter) reset(out []byte) {
+ b.bitContainer = 0
+ b.nBits = 0
+ b.out = out
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
new file mode 100644
index 000000000..03744fbc7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -0,0 +1,729 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/klauspost/compress/huff0"
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type blockType uint8
+
+//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
+
+const (
+ blockTypeRaw blockType = iota
+ blockTypeRLE
+ blockTypeCompressed
+ blockTypeReserved
+)
+
+type literalsBlockType uint8
+
+const (
+ literalsBlockRaw literalsBlockType = iota
+ literalsBlockRLE
+ literalsBlockCompressed
+ literalsBlockTreeless
+)
+
+const (
+ // maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
+ maxCompressedBlockSize = 128 << 10
+
+ compressedBlockOverAlloc = 16
+ maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
+
+ // Maximum possible block size (all Raw+Uncompressed).
+ maxBlockSize = (1 << 21) - 1
+
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
+
+ // We support slightly less than the reference decoder to be able to
+ // use ints on 32 bit archs.
+ maxOffsetBits = 30
+)
+
+var (
+ huffDecoderPool = sync.Pool{New: func() interface{} {
+ return &huff0.Scratch{}
+ }}
+
+ fseDecoderPool = sync.Pool{New: func() interface{} {
+ return &fseDecoder{}
+ }}
+)
+
+type blockDec struct {
+ // Raw source data of the block.
+ data []byte
+ dataStorage []byte
+
+ // Destination of the decoded data.
+ dst []byte
+
+ // Buffer for literals data.
+ literalBuf []byte
+
+ // Window size of the block.
+ WindowSize uint64
+
+ err error
+
+ // Check against this crc, if hasCRC is true.
+ checkCRC uint32
+ hasCRC bool
+
+ // Frame to use for singlethreaded decoding.
+ // Should not be used by the decoder itself since parent may be another frame.
+ localFrame *frameDec
+
+ sequence []seqVals
+
+ async struct {
+ newHist *history
+ literals []byte
+ seqData []byte
+ seqSize int // Size of uncompressed sequences
+ fcs uint64
+ }
+
+ // Block is RLE, this is the size.
+ RLESize uint32
+
+ Type blockType
+
+ // Is this the last block of a frame?
+ Last bool
+
+ // Use less memory
+ lowMem bool
+}
+
+func (b *blockDec) String() string {
+ if b == nil {
+ return ""
+ }
+ return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
+}
+
+func newBlockDec(lowMem bool) *blockDec {
+ b := blockDec{
+ lowMem: lowMem,
+ }
+ return &b
+}
+
+// reset will reset the block.
+// Input must be a start of a block and will be at the end of the block when returned.
+func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
+ b.WindowSize = windowSize
+ tmp, err := br.readSmall(3)
+ if err != nil {
+ println("Reading block header:", err)
+ return err
+ }
+ bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+ b.Last = bh&1 != 0
+ b.Type = blockType((bh >> 1) & 3)
+ // find size.
+ cSize := int(bh >> 3)
+ maxSize := maxCompressedBlockSizeAlloc
+ switch b.Type {
+ case blockTypeReserved:
+ return ErrReservedBlockType
+ case blockTypeRLE:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
+ b.RLESize = uint32(cSize)
+ if b.lowMem {
+ maxSize = cSize
+ }
+ cSize = 1
+ case blockTypeCompressed:
+ if debugDecoder {
+ println("Data size on stream:", cSize)
+ }
+ b.RLESize = 0
+ maxSize = maxCompressedBlockSizeAlloc
+ if windowSize < maxCompressedBlockSize && b.lowMem {
+ maxSize = int(windowSize) + compressedBlockOverAlloc
+ }
+ if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
+ if debugDecoder {
+ printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrCompressedSizeTooBig
+ }
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
+ case blockTypeRaw:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
+
+ b.RLESize = 0
+ // We do not need a destination for raw blocks.
+ maxSize = -1
+ default:
+ panic("Invalid block type")
+ }
+
+ // Read block data.
+ if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
+ // byteBuf doesn't need a destination buffer.
+ if b.lowMem || cSize > maxCompressedBlockSize {
+ b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
+ } else {
+ b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
+ }
+ }
+ b.data, err = br.readBig(cSize, b.dataStorage)
+ if err != nil {
+ if debugDecoder {
+ println("Reading block:", err, "(", cSize, ")", len(b.data))
+ printf("%T", br)
+ }
+ return err
+ }
+ if cap(b.dst) <= maxSize {
+ b.dst = make([]byte, 0, maxSize+1)
+ }
+ return nil
+}
+
+// sendEOF will make the decoder send EOF on this frame.
+func (b *blockDec) sendErr(err error) {
+ b.Last = true
+ b.Type = blockTypeReserved
+ b.err = err
+}
+
+// Close will release resources.
+// Closed blockDec cannot be reset.
+func (b *blockDec) Close() {
+}
+
+// decodeBuf
+func (b *blockDec) decodeBuf(hist *history) error {
+ switch b.Type {
+ case blockTypeRLE:
+ if cap(b.dst) < int(b.RLESize) {
+ if b.lowMem {
+ b.dst = make([]byte, b.RLESize)
+ } else {
+ b.dst = make([]byte, maxCompressedBlockSize)
+ }
+ }
+ b.dst = b.dst[:b.RLESize]
+ v := b.data[0]
+ for i := range b.dst {
+ b.dst[i] = v
+ }
+ hist.appendKeep(b.dst)
+ return nil
+ case blockTypeRaw:
+ hist.appendKeep(b.data)
+ return nil
+ case blockTypeCompressed:
+ saved := b.dst
+ // Append directly to history
+ if hist.ignoreBuffer == 0 {
+ b.dst = hist.b
+ hist.b = nil
+ } else {
+ b.dst = b.dst[:0]
+ }
+ err := b.decodeCompressed(hist)
+ if debugDecoder {
+ println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
+ }
+ if hist.ignoreBuffer == 0 {
+ hist.b = b.dst
+ b.dst = saved
+ } else {
+ hist.appendKeep(b.dst)
+ }
+ return err
+ case blockTypeReserved:
+ // Used for returning errors.
+ return b.err
+ default:
+ panic("Invalid block type")
+ }
+}
+
+func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
+ // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
+ if len(in) < 2 {
+ return in, ErrBlockTooSmall
+ }
+
+ litType := literalsBlockType(in[0] & 3)
+ var litRegenSize int
+ var litCompSize int
+ sizeFormat := (in[0] >> 2) & 3
+ var fourStreams bool
+ var literals []byte
+ switch litType {
+ case literalsBlockRaw, literalsBlockRLE:
+ switch sizeFormat {
+ case 0, 2:
+ // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
+ litRegenSize = int(in[0] >> 3)
+ in = in[1:]
+ case 1:
+ // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
+ litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
+ in = in[2:]
+ case 3:
+ // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
+ if len(in) < 3 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return in, ErrBlockTooSmall
+ }
+ litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
+ in = in[3:]
+ }
+ case literalsBlockCompressed, literalsBlockTreeless:
+ switch sizeFormat {
+ case 0, 1:
+ // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
+ if len(in) < 3 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return in, ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
+ litRegenSize = int(n & 1023)
+ litCompSize = int(n >> 10)
+ fourStreams = sizeFormat == 1
+ in = in[3:]
+ case 2:
+ fourStreams = true
+ if len(in) < 4 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return in, ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
+ litRegenSize = int(n & 16383)
+ litCompSize = int(n >> 14)
+ in = in[4:]
+ case 3:
+ fourStreams = true
+ if len(in) < 5 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
+ return in, ErrBlockTooSmall
+ }
+ n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
+ litRegenSize = int(n & 262143)
+ litCompSize = int(n >> 18)
+ in = in[5:]
+ }
+ }
+ if debugDecoder {
+ println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
+ }
+ if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
+ return in, ErrWindowSizeExceeded
+ }
+
+ switch litType {
+ case literalsBlockRaw:
+ if len(in) < litRegenSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
+ return in, ErrBlockTooSmall
+ }
+ literals = in[:litRegenSize]
+ in = in[litRegenSize:]
+ //printf("Found %d uncompressed literals\n", litRegenSize)
+ case literalsBlockRLE:
+ if len(in) < 1 {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
+ return in, ErrBlockTooSmall
+ }
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
+ } else {
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
+ }
+ }
+ literals = b.literalBuf[:litRegenSize]
+ v := in[0]
+ for i := range literals {
+ literals[i] = v
+ }
+ in = in[1:]
+ if debugDecoder {
+ printf("Found %d RLE compressed literals\n", litRegenSize)
+ }
+ case literalsBlockTreeless:
+ if len(in) < litCompSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+ return in, ErrBlockTooSmall
+ }
+ // Store compressed literals, so we defer decoding until we get history.
+ literals = in[:litCompSize]
+ in = in[litCompSize:]
+ if debugDecoder {
+ printf("Found %d compressed literals\n", litCompSize)
+ }
+ huff := hist.huffTree
+ if huff == nil {
+ return in, errors.New("literal block was treeless, but no history was defined")
+ }
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
+ }
+ }
+ var err error
+ // Use our out buffer.
+ huff.MaxDecodedSize = litRegenSize
+ if fourStreams {
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+ } else {
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+ }
+ // Make sure we don't leak our literals buffer
+ if err != nil {
+ println("decompressing literals:", err)
+ return in, err
+ }
+ if len(literals) != litRegenSize {
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+
+ case literalsBlockCompressed:
+ if len(in) < litCompSize {
+ println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
+ return in, ErrBlockTooSmall
+ }
+ literals = in[:litCompSize]
+ in = in[litCompSize:]
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
+ }
+ }
+ huff := hist.huffTree
+ if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
+ huff = huffDecoderPool.Get().(*huff0.Scratch)
+ if huff == nil {
+ huff = &huff0.Scratch{}
+ }
+ }
+ var err error
+ if debugDecoder {
+ println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
+ }
+ huff, literals, err = huff0.ReadTable(literals, huff)
+ if err != nil {
+ println("reading huffman table:", err)
+ return in, err
+ }
+ hist.huffTree = huff
+ huff.MaxDecodedSize = litRegenSize
+ // Use our out buffer.
+ if fourStreams {
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+ } else {
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+ }
+ if err != nil {
+ println("decoding compressed literals:", err)
+ return in, err
+ }
+ // Make sure we don't leak our literals buffer
+ if len(literals) != litRegenSize {
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+ // Re-cap to get extra size.
+ literals = b.literalBuf[:len(literals)]
+ if debugDecoder {
+ printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
+ }
+ }
+ hist.decoders.literals = literals
+ return in, nil
+}
+
+// decodeCompressed will start decompressing a block.
+func (b *blockDec) decodeCompressed(hist *history) error {
+ in := b.data
+ in, err := b.decodeLiterals(in, hist)
+ if err != nil {
+ return err
+ }
+ err = b.prepareSequences(in, hist)
+ if err != nil {
+ return err
+ }
+ if hist.decoders.nSeqs == 0 {
+ b.dst = append(b.dst, hist.decoders.literals...)
+ return nil
+ }
+ before := len(hist.decoders.out)
+ err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
+ if err != nil {
+ return err
+ }
+ if hist.decoders.maxSyncLen > 0 {
+ hist.decoders.maxSyncLen += uint64(before)
+ hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
+ }
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
+ return nil
+}
+
+func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
+ // Decode Sequences
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
+ if len(in) < 1 {
+ return ErrBlockTooSmall
+ }
+ var nSeqs int
+ seqHeader := in[0]
+ switch {
+ case seqHeader < 128:
+ nSeqs = int(seqHeader)
+ in = in[1:]
+ case seqHeader < 255:
+ if len(in) < 2 {
+ return ErrBlockTooSmall
+ }
+ nSeqs = int(seqHeader-128)<<8 | int(in[1])
+ in = in[2:]
+ case seqHeader == 255:
+ if len(in) < 3 {
+ return ErrBlockTooSmall
+ }
+ nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
+ in = in[3:]
+ }
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
+ }
+ return ErrUnexpectedBlockSize
+ }
+
+ var seqs = &hist.decoders
+ seqs.nSeqs = nSeqs
+ if nSeqs > 0 {
+ if len(in) < 1 {
+ return ErrBlockTooSmall
+ }
+ br := byteReader{b: in, off: 0}
+ compMode := br.Uint8()
+ br.advance(1)
+ if debugDecoder {
+ printf("Compression modes: 0b%b", compMode)
+ }
+ if compMode&3 != 0 {
+ return errors.New("corrupt block: reserved bits not zero")
+ }
+ for i := uint(0); i < 3; i++ {
+ mode := seqCompMode((compMode >> (6 - i*2)) & 3)
+ if debugDecoder {
+ println("Table", tableIndex(i), "is", mode)
+ }
+ var seq *sequenceDec
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ seq = &seqs.litLengths
+ case tableOffsets:
+ seq = &seqs.offsets
+ case tableMatchLengths:
+ seq = &seqs.matchLengths
+ default:
+ panic("unknown table")
+ }
+ switch mode {
+ case compModePredefined:
+ if seq.fse != nil && !seq.fse.preDefined {
+ fseDecoderPool.Put(seq.fse)
+ }
+ seq.fse = &fsePredef[i]
+ case compModeRLE:
+ if br.remain() < 1 {
+ return ErrBlockTooSmall
+ }
+ v := br.Uint8()
+ br.advance(1)
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
+ symb, err := decSymbolValue(v, symbolTableX[i])
+ if err != nil {
+ printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
+ return err
+ }
+ seq.fse.setRLE(symb)
+ if debugDecoder {
+ printf("RLE set to 0x%x, code: %v", symb, v)
+ }
+ case compModeFSE:
+ println("Reading table for", tableIndex(i))
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
+ err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
+ if err != nil {
+ println("Read table error:", err)
+ return err
+ }
+ err = seq.fse.transform(symbolTableX[i])
+ if err != nil {
+ println("Transform table error:", err)
+ return err
+ }
+ if debugDecoder {
+ println("Read table ok", "symbolLen:", seq.fse.symbolLen)
+ }
+ case compModeRepeat:
+ seq.repeat = true
+ }
+ if br.overread() {
+ return io.ErrUnexpectedEOF
+ }
+ }
+ in = br.unread()
+ }
+ if debugDecoder {
+ println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
+ }
+
+ if nSeqs == 0 {
+ if len(b.sequence) > 0 {
+ b.sequence = b.sequence[:0]
+ }
+ return nil
+ }
+ br := seqs.br
+ if br == nil {
+ br = &bitReader{}
+ }
+ if err := br.init(in); err != nil {
+ return err
+ }
+
+ if err := seqs.initialize(br, hist, b.dst); err != nil {
+ println("initializing sequences:", err)
+ return err
+ }
+ // Extract blocks...
+ if false && hist.dict == nil {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
+ var buf bytes.Buffer
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
+ buf.Write(in)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+ }
+
+ return nil
+}
+
+func (b *blockDec) decodeSequences(hist *history) error {
+ if cap(b.sequence) < hist.decoders.nSeqs {
+ if b.lowMem {
+ b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
+ } else {
+ b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
+ }
+ }
+ b.sequence = b.sequence[:hist.decoders.nSeqs]
+ if hist.decoders.nSeqs == 0 {
+ hist.decoders.seqSize = len(hist.decoders.literals)
+ return nil
+ }
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.prevOffset = hist.recentOffsets
+
+ err := hist.decoders.decode(b.sequence)
+ hist.recentOffsets = hist.decoders.prevOffset
+ return err
+}
+
+func (b *blockDec) executeSequences(hist *history) error {
+ hbytes := hist.b
+ if len(hbytes) > hist.windowSize {
+ hbytes = hbytes[len(hbytes)-hist.windowSize:]
+ // We do not need history anymore.
+ if hist.dict != nil {
+ hist.dict.content = nil
+ }
+ }
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.out = b.dst[:0]
+ err := hist.decoders.execute(b.sequence, hbytes)
+ if err != nil {
+ return err
+ }
+ return b.updateHistory(hist)
+}
+
+func (b *blockDec) updateHistory(hist *history) error {
+ if len(b.data) > maxCompressedBlockSize {
+ return fmt.Errorf("compressed block size too large (%d)", len(b.data))
+ }
+ // Set output and release references.
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
+
+ if b.Last {
+ // if last block we don't care about history.
+ println("Last block, no history returned")
+ hist.b = hist.b[:0]
+ return nil
+ } else {
+ hist.append(b.dst)
+ if debugDecoder {
+ println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
+ }
+ }
+ hist.decoders.out, hist.decoders.literals = nil, nil
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
new file mode 100644
index 000000000..32a7f401d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -0,0 +1,909 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type blockEnc struct {
+ size int
+ literals []byte
+ sequences []seq
+ coders seqCoders
+ litEnc *huff0.Scratch
+ dictLitEnc *huff0.Scratch
+ wr bitWriter
+
+ extraLits int
+ output []byte
+ recentOffsets [3]uint32
+ prevRecentOffsets [3]uint32
+
+ last bool
+ lowMem bool
+}
+
+// init should be used once the block has been created.
+// If called more than once, the effect is the same as calling reset.
+func (b *blockEnc) init() {
+ if b.lowMem {
+ // 1K literals
+ if cap(b.literals) < 1<<10 {
+ b.literals = make([]byte, 0, 1<<10)
+ }
+ const defSeqs = 20
+ if cap(b.sequences) < defSeqs {
+ b.sequences = make([]seq, 0, defSeqs)
+ }
+ // 1K
+ if cap(b.output) < 1<<10 {
+ b.output = make([]byte, 0, 1<<10)
+ }
+ } else {
+ if cap(b.literals) < maxCompressedBlockSize {
+ b.literals = make([]byte, 0, maxCompressedBlockSize)
+ }
+ const defSeqs = 2000
+ if cap(b.sequences) < defSeqs {
+ b.sequences = make([]seq, 0, defSeqs)
+ }
+ if cap(b.output) < maxCompressedBlockSize {
+ b.output = make([]byte, 0, maxCompressedBlockSize)
+ }
+ }
+
+ if b.coders.mlEnc == nil {
+ b.coders.mlEnc = &fseEncoder{}
+ b.coders.mlPrev = &fseEncoder{}
+ b.coders.ofEnc = &fseEncoder{}
+ b.coders.ofPrev = &fseEncoder{}
+ b.coders.llEnc = &fseEncoder{}
+ b.coders.llPrev = &fseEncoder{}
+ }
+ b.litEnc = &huff0.Scratch{WantLogLess: 4}
+ b.reset(nil)
+}
+
+// initNewEncode can be used to reset offsets and encoders to the initial state.
+func (b *blockEnc) initNewEncode() {
+ b.recentOffsets = [3]uint32{1, 4, 8}
+ b.litEnc.Reuse = huff0.ReusePolicyNone
+ b.coders.setPrev(nil, nil, nil)
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) reset(prev *blockEnc) {
+ b.extraLits = 0
+ b.literals = b.literals[:0]
+ b.size = 0
+ b.sequences = b.sequences[:0]
+ b.output = b.output[:0]
+ b.last = false
+ if prev != nil {
+ b.recentOffsets = prev.prevRecentOffsets
+ }
+ b.dictLitEnc = nil
+}
+
+// reset will reset the block for a new encode, but in the same stream,
+// meaning that state will be carried over, but the block content is reset.
+// If a previous block is provided, the recent offsets are carried over.
+func (b *blockEnc) swapEncoders(prev *blockEnc) {
+ b.coders.swap(&prev.coders)
+ b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
+}
+
+// blockHeader contains the information for a block header.
+type blockHeader uint32
+
+// setLast sets the 'last' indicator on a block.
+func (h *blockHeader) setLast(b bool) {
+ if b {
+ *h = *h | 1
+ } else {
+ const mask = (1 << 24) - 2
+ *h = *h & mask
+ }
+}
+
+// setSize will store the compressed size of a block.
+func (h *blockHeader) setSize(v uint32) {
+ const mask = 7
+ *h = (*h)&mask | blockHeader(v<<3)
+}
+
+// setType sets the block type.
+func (h *blockHeader) setType(t blockType) {
+ const mask = 1 | (((1 << 24) - 1) ^ 7)
+ *h = (*h & mask) | blockHeader(t<<1)
+}
+
+// appendTo will append the block header to a slice.
+func (h blockHeader) appendTo(b []byte) []byte {
+ return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+}
+
+// String returns a string representation of the block.
+func (h blockHeader) String() string {
+ return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
+}
+
+// literalsHeader contains literals header information.
+type literalsHeader uint64
+
+// setType can be used to set the type of literal block.
+func (h *literalsHeader) setType(t literalsBlockType) {
+ const mask = math.MaxUint64 - 3
+ *h = (*h & mask) | literalsHeader(t)
+}
+
+// setSize can be used to set a single size, for uncompressed and RLE content.
+func (h *literalsHeader) setSize(regenLen int) {
+ inBits := bits.Len32(uint32(regenLen))
+ // Only retain 2 bits
+ const mask = 3
+ lh := uint64(*h & mask)
+ switch {
+ case inBits < 5:
+ lh |= (uint64(regenLen) << 3) | (1 << 60)
+ if debugEncoder {
+ got := int(lh>>3) & 0xff
+ if got != regenLen {
+ panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
+ }
+ }
+ case inBits < 12:
+ lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
+ case inBits < 20:
+ lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
+ default:
+ panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
+ }
+ *h = literalsHeader(lh)
+}
+
+// setSizes will set the size of a compressed literals section and the input length.
+func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
+ compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
+ // Only retain 2 bits
+ const mask = 3
+ lh := uint64(*h & mask)
+ switch {
+ case compBits <= 10 && inBits <= 10:
+ if !single {
+ lh |= 1 << 2
+ }
+ lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
+ if debugEncoder {
+ const mmask = (1 << 24) - 1
+ n := (lh >> 4) & mmask
+ if int(n&1023) != inLen {
+ panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
+ }
+ if int(n>>10) != compLen {
+ panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
+ }
+ }
+ case compBits <= 14 && inBits <= 14:
+ lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
+ if single {
+ panic("single stream used with more than 10 bits length.")
+ }
+ case compBits <= 18 && inBits <= 18:
+ lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
+ if single {
+ panic("single stream used with more than 10 bits length.")
+ }
+ default:
+ panic("internal error: block too big")
+ }
+ *h = literalsHeader(lh)
+}
+
+// appendTo will append the literals header to a byte slice.
+func (h literalsHeader) appendTo(b []byte) []byte {
+ size := uint8(h >> 60)
+ switch size {
+ case 1:
+ b = append(b, uint8(h))
+ case 2:
+ b = append(b, uint8(h), uint8(h>>8))
+ case 3:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
+ case 4:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
+ case 5:
+ b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
+ default:
+ panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
+ }
+ return b
+}
+
+// size returns the output size with currently set values.
+func (h literalsHeader) size() int {
+ return int(h >> 60)
+}
+
+func (h literalsHeader) String() string {
+ return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) pushOffsets() {
+ b.prevRecentOffsets = b.recentOffsets
+}
+
+// pushOffsets will push the recent offsets to the backup store.
+func (b *blockEnc) popOffsets() {
+ b.recentOffsets = b.prevRecentOffsets
+}
+
+// matchOffset will adjust recent offsets and return the adjusted one,
+// if it matches a previous offset.
+func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
+ // Check if offset is one of the recent offsets.
+ // Adjusts the output offset accordingly.
+ // Gives a tiny bit of compression, typically around 1%.
+ if true {
+ if lits > 0 {
+ switch offset {
+ case b.recentOffsets[0]:
+ offset = 1
+ case b.recentOffsets[1]:
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 2
+ case b.recentOffsets[2]:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 3
+ default:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset += 3
+ }
+ } else {
+ switch offset {
+ case b.recentOffsets[1]:
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 1
+ case b.recentOffsets[2]:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 2
+ case b.recentOffsets[0] - 1:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset = 3
+ default:
+ b.recentOffsets[2] = b.recentOffsets[1]
+ b.recentOffsets[1] = b.recentOffsets[0]
+ b.recentOffsets[0] = offset
+ offset += 3
+ }
+ }
+ } else {
+ offset += 3
+ }
+ return offset
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRaw(a []byte) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(a)))
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output[:0])
+ b.output = append(b.output, a...)
+ if debugEncoder {
+ println("Adding RAW block, length", len(a), "last:", b.last)
+ }
+}
+
+// encodeRaw can be used to set the output to a raw representation of supplied bytes.
+func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(src)))
+ bh.setType(blockTypeRaw)
+ dst = bh.appendTo(dst)
+ dst = append(dst, src...)
+ if debugEncoder {
+ println("Adding RAW block, length", len(src), "last:", b.last)
+ }
+ return dst
+}
+
+// encodeLits can be used if the block is only litLen.
+func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(uint32(len(lits)))
+
+ // Don't compress extremely small blocks
+ if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
+ if debugEncoder {
+ println("Adding RAW block, length", len(lits), "last:", b.last)
+ }
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, lits...)
+ return nil
+ }
+
+ var (
+ out []byte
+ reUsed, single bool
+ err error
+ )
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
+ if len(lits) >= 1024 {
+ // Use 4 Streams.
+ out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
+ } else if len(lits) > 16 {
+ // Use 1 stream
+ single = true
+ out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
+ } else {
+ err = huff0.ErrIncompressible
+ }
+ if err == nil && len(out)+5 > len(lits) {
+ // If we are close, we may still be worse or equal to raw.
+ var lh literalsHeader
+ lh.setSizes(len(out), len(lits), single)
+ if len(out)+lh.size() >= len(lits) {
+ err = huff0.ErrIncompressible
+ }
+ }
+ switch err {
+ case huff0.ErrIncompressible:
+ if debugEncoder {
+ println("Adding RAW block, length", len(lits), "last:", b.last)
+ }
+ bh.setType(blockTypeRaw)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, lits...)
+ return nil
+ case huff0.ErrUseRLE:
+ if debugEncoder {
+ println("Adding RLE block, length", len(lits))
+ }
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, lits[0])
+ return nil
+ case nil:
+ default:
+ return err
+ }
+ // Compressed...
+ // Now, allow reuse
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ bh.setType(blockTypeCompressed)
+ var lh literalsHeader
+ if reUsed {
+ if debugEncoder {
+ println("Reused tree, compressed to", len(out))
+ }
+ lh.setType(literalsBlockTreeless)
+ } else {
+ if debugEncoder {
+ println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
+ }
+ lh.setType(literalsBlockCompressed)
+ }
+ // Set sizes
+ lh.setSizes(len(out), len(lits), single)
+ bh.setSize(uint32(len(out) + lh.size() + 1))
+
+ // Write block headers.
+ b.output = bh.appendTo(b.output)
+ b.output = lh.appendTo(b.output)
+ // Add compressed data.
+ b.output = append(b.output, out...)
+ // No sequences.
+ b.output = append(b.output, 0)
+ return nil
+}
+
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(length)
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, val)
+}
+
+// fuzzFseEncoder can be used to fuzz the FSE encoder.
+func fuzzFseEncoder(data []byte) int {
+ if len(data) > maxSequences || len(data) < 2 {
+ return 0
+ }
+ enc := fseEncoder{}
+ hist := enc.Histogram()
+ maxSym := uint8(0)
+ for i, v := range data {
+ v = v & 63
+ data[i] = v
+ hist[v]++
+ if v > maxSym {
+ maxSym = v
+ }
+ }
+ if maxSym == 0 {
+ // All 0
+ return 0
+ }
+ maxCount := func(a []uint32) int {
+ var max uint32
+ for _, v := range a {
+ if v > max {
+ max = v
+ }
+ }
+ return int(max)
+ }
+ cnt := maxCount(hist[:maxSym])
+ if cnt == len(data) {
+ // RLE
+ return 0
+ }
+ enc.HistogramFinished(maxSym, cnt)
+ err := enc.normalizeCount(len(data))
+ if err != nil {
+ return 0
+ }
+ _, err = enc.writeCount(nil)
+ if err != nil {
+ panic(err)
+ }
+ return 1
+}
+
+// encode will encode the block and append the output in b.output.
+// Previous offset codes must be pushed if more blocks are expected.
+func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
+ if len(b.sequences) == 0 {
+ return b.encodeLits(b.literals, rawAllLits)
+ }
+ if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+ // Check common RLE cases.
+ seq := b.sequences[0]
+ if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+ // Offset == 1 and 0 or 1 literals.
+ b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+ return nil
+ }
+ }
+
+ // We want some difference to at least account for the headers.
+ saved := b.size - len(b.literals) - (b.size >> 6)
+ if saved < 16 {
+ if org == nil {
+ return errIncompressible
+ }
+ b.popOffsets()
+ return b.encodeLits(org, rawAllLits)
+ }
+
+ var bh blockHeader
+ var lh literalsHeader
+ bh.setLast(b.last)
+ bh.setType(blockTypeCompressed)
+ // Store offset of the block header. Needed when we know the size.
+ bhOffset := len(b.output)
+ b.output = bh.appendTo(b.output)
+
+ var (
+ out []byte
+ reUsed, single bool
+ err error
+ )
+ if b.dictLitEnc != nil {
+ b.litEnc.TransferCTable(b.dictLitEnc)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ b.dictLitEnc = nil
+ }
+ if len(b.literals) >= 1024 && !raw {
+ // Use 4 Streams.
+ out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
+ } else if len(b.literals) > 16 && !raw {
+ // Use 1 stream
+ single = true
+ out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
+ } else {
+ err = huff0.ErrIncompressible
+ }
+
+ if err == nil && len(out)+5 > len(b.literals) {
+ // If we are close, we may still be worse or equal to raw.
+ var lh literalsHeader
+ lh.setSize(len(b.literals))
+ szRaw := lh.size()
+ lh.setSizes(len(out), len(b.literals), single)
+ szComp := lh.size()
+ if len(out)+szComp >= len(b.literals)+szRaw {
+ err = huff0.ErrIncompressible
+ }
+ }
+ switch err {
+ case huff0.ErrIncompressible:
+ lh.setType(literalsBlockRaw)
+ lh.setSize(len(b.literals))
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, b.literals...)
+ if debugEncoder {
+ println("Adding literals RAW, length", len(b.literals))
+ }
+ case huff0.ErrUseRLE:
+ lh.setType(literalsBlockRLE)
+ lh.setSize(len(b.literals))
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, b.literals[0])
+ if debugEncoder {
+ println("Adding literals RLE")
+ }
+ case nil:
+ // Compressed litLen...
+ if reUsed {
+ if debugEncoder {
+ println("reused tree")
+ }
+ lh.setType(literalsBlockTreeless)
+ } else {
+ if debugEncoder {
+ println("new tree, size:", len(b.litEnc.OutTable))
+ }
+ lh.setType(literalsBlockCompressed)
+ if debugEncoder {
+ _, _, err := huff0.ReadTable(out, nil)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+ lh.setSizes(len(out), len(b.literals), single)
+ if debugEncoder {
+ printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
+ println("Adding literal header:", lh)
+ }
+ b.output = lh.appendTo(b.output)
+ b.output = append(b.output, out...)
+ b.litEnc.Reuse = huff0.ReusePolicyAllow
+ if debugEncoder {
+ println("Adding literals compressed")
+ }
+ default:
+ if debugEncoder {
+ println("Adding literals ERROR:", err)
+ }
+ return err
+ }
+ // Sequence compression
+
+ // Write the number of sequences
+ switch {
+ case len(b.sequences) < 128:
+ b.output = append(b.output, uint8(len(b.sequences)))
+ case len(b.sequences) < 0x7f00: // TODO: this could be wrong
+ n := len(b.sequences)
+ b.output = append(b.output, 128+uint8(n>>8), uint8(n))
+ default:
+ n := len(b.sequences) - 0x7f00
+ b.output = append(b.output, 255, uint8(n), uint8(n>>8))
+ }
+ if debugEncoder {
+ println("Encoding", len(b.sequences), "sequences")
+ }
+ b.genCodes()
+ llEnc := b.coders.llEnc
+ ofEnc := b.coders.ofEnc
+ mlEnc := b.coders.mlEnc
+ err = llEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+ err = ofEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+ err = mlEnc.normalizeCount(len(b.sequences))
+ if err != nil {
+ return err
+ }
+
+ // Choose the best compression mode for each type.
+ // Will evaluate the new vs predefined and previous.
+ chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
+ // See if predefined/previous is better
+ hist := cur.count[:cur.symbolLen]
+ nSize := cur.approxSize(hist) + cur.maxHeaderSize()
+ predefSize := preDef.approxSize(hist)
+ prevSize := prev.approxSize(hist)
+
+ // Add a small penalty for new encoders.
+ // Don't bother with extremely small (<2 byte gains).
+ nSize = nSize + (nSize+2*8*16)>>4
+ switch {
+ case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
+ if debugEncoder {
+ println("Using predefined", predefSize>>3, "<=", nSize>>3)
+ }
+ return preDef, compModePredefined
+ case prevSize <= nSize:
+ if debugEncoder {
+ println("Using previous", prevSize>>3, "<=", nSize>>3)
+ }
+ return prev, compModeRepeat
+ default:
+ if debugEncoder {
+ println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
+ println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
+ }
+ return cur, compModeFSE
+ }
+ }
+
+ // Write compression mode
+ var mode uint8
+ if llEnc.useRLE {
+ mode |= uint8(compModeRLE) << 6
+ llEnc.setRLE(b.sequences[0].llCode)
+ if debugEncoder {
+ println("llEnc.useRLE")
+ }
+ } else {
+ var m seqCompMode
+ llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
+ mode |= uint8(m) << 6
+ }
+ if ofEnc.useRLE {
+ mode |= uint8(compModeRLE) << 4
+ ofEnc.setRLE(b.sequences[0].ofCode)
+ if debugEncoder {
+ println("ofEnc.useRLE")
+ }
+ } else {
+ var m seqCompMode
+ ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
+ mode |= uint8(m) << 4
+ }
+
+ if mlEnc.useRLE {
+ mode |= uint8(compModeRLE) << 2
+ mlEnc.setRLE(b.sequences[0].mlCode)
+ if debugEncoder {
+ println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
+ }
+ } else {
+ var m seqCompMode
+ mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
+ mode |= uint8(m) << 2
+ }
+ b.output = append(b.output, mode)
+ if debugEncoder {
+ printf("Compression modes: 0b%b", mode)
+ }
+ b.output, err = llEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+ start := len(b.output)
+ b.output, err = ofEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+ if false {
+ println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
+ fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
+ for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
+ fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
+ }
+ }
+ b.output, err = mlEnc.writeCount(b.output)
+ if err != nil {
+ return err
+ }
+
+ // Maybe in block?
+ wr := &b.wr
+ wr.reset(b.output)
+
+ var ll, of, ml cState
+
+ // Current sequence
+ seq := len(b.sequences) - 1
+ s := b.sequences[seq]
+ llEnc.setBits(llBitsTable[:])
+ mlEnc.setBits(mlBitsTable[:])
+ ofEnc.setBits(nil)
+
+ llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
+
+ // We have 3 bounds checks here (and in the loop).
+ // Since we are iterating backwards it is kinda hard to avoid.
+ llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
+ ll.init(wr, &llEnc.ct, llB)
+ of.init(wr, &ofEnc.ct, ofB)
+ wr.flush32()
+ ml.init(wr, &mlEnc.ct, mlB)
+
+ // Each of these lookups also generates a bounds check.
+ wr.addBits32NC(s.litLen, llB.outBits)
+ wr.addBits32NC(s.matchLen, mlB.outBits)
+ wr.flush32()
+ wr.addBits32NC(s.offset, ofB.outBits)
+ if debugSequences {
+ println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
+ }
+ seq--
+ // Store sequences in reverse...
+ for seq >= 0 {
+ s = b.sequences[seq]
+
+ ofB := ofTT[s.ofCode]
+ wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
+ //of.encode(ofB)
+ nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
+ dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
+ wr.addBits16NC(of.state, uint8(nbBitsOut))
+ of.state = of.stateTable[dstState]
+
+ // Accumulate extra bits.
+ outBits := ofB.outBits & 31
+ extraBits := uint64(s.offset & bitMask32[outBits])
+ extraBitsN := outBits
+
+ mlB := mlTT[s.mlCode]
+ //ml.encode(mlB)
+ nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
+ dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
+ wr.addBits16NC(ml.state, uint8(nbBitsOut))
+ ml.state = ml.stateTable[dstState]
+
+ outBits = mlB.outBits & 31
+ extraBits = extraBits<> 16
+ dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
+ wr.addBits16NC(ll.state, uint8(nbBitsOut))
+ ll.state = ll.stateTable[dstState]
+
+ outBits = llB.outBits & 31
+ extraBits = extraBits<= b.size {
+ // Discard and encode as raw block.
+ b.output = b.encodeRawTo(b.output[:bhOffset], org)
+ b.popOffsets()
+ b.litEnc.Reuse = huff0.ReusePolicyNone
+ return nil
+ }
+
+ // Size is output minus block header.
+ bh.setSize(uint32(len(b.output)-bhOffset) - 3)
+ if debugEncoder {
+ println("Rewriting block header", bh)
+ }
+ _ = bh.appendTo(b.output[bhOffset:bhOffset])
+ b.coders.setPrev(llEnc, mlEnc, ofEnc)
+ return nil
+}
+
+var errIncompressible = errors.New("incompressible")
+
+func (b *blockEnc) genCodes() {
+ if len(b.sequences) == 0 {
+ // nothing to do
+ return
+ }
+ if len(b.sequences) > math.MaxUint16 {
+ panic("can only encode up to 64K sequences")
+ }
+ // No bounds checks after here:
+ llH := b.coders.llEnc.Histogram()
+ ofH := b.coders.ofEnc.Histogram()
+ mlH := b.coders.mlEnc.Histogram()
+ for i := range llH {
+ llH[i] = 0
+ }
+ for i := range ofH {
+ ofH[i] = 0
+ }
+ for i := range mlH {
+ mlH[i] = 0
+ }
+
+ var llMax, ofMax, mlMax uint8
+ for i := range b.sequences {
+ seq := &b.sequences[i]
+ v := llCode(seq.litLen)
+ seq.llCode = v
+ llH[v]++
+ if v > llMax {
+ llMax = v
+ }
+
+ v = ofCode(seq.offset)
+ seq.ofCode = v
+ ofH[v]++
+ if v > ofMax {
+ ofMax = v
+ }
+
+ v = mlCode(seq.matchLen)
+ seq.mlCode = v
+ mlH[v]++
+ if v > mlMax {
+ mlMax = v
+ if debugAsserts && mlMax > maxMatchLengthSymbol {
+ panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
+ }
+ }
+ }
+ maxCount := func(a []uint32) int {
+ var max uint32
+ for _, v := range a {
+ if v > max {
+ max = v
+ }
+ }
+ return int(max)
+ }
+ if debugAsserts && mlMax > maxMatchLengthSymbol {
+ panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
+ }
+ if debugAsserts && ofMax > maxOffsetBits {
+ panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
+ }
+ if debugAsserts && llMax > maxLiteralLengthSymbol {
+ panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
+ }
+
+ b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
+ b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
+ b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
new file mode 100644
index 000000000..01a01e486
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go
@@ -0,0 +1,85 @@
+// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
+
+package zstd
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[blockTypeRaw-0]
+ _ = x[blockTypeRLE-1]
+ _ = x[blockTypeCompressed-2]
+ _ = x[blockTypeReserved-3]
+}
+
+const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
+
+var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
+
+func (i blockType) String() string {
+ if i >= blockType(len(_blockType_index)-1) {
+ return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[literalsBlockRaw-0]
+ _ = x[literalsBlockRLE-1]
+ _ = x[literalsBlockCompressed-2]
+ _ = x[literalsBlockTreeless-3]
+}
+
+const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
+
+var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
+
+func (i literalsBlockType) String() string {
+ if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
+ return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[compModePredefined-0]
+ _ = x[compModeRLE-1]
+ _ = x[compModeFSE-2]
+ _ = x[compModeRepeat-3]
+}
+
+const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
+
+var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
+
+func (i seqCompMode) String() string {
+ if i >= seqCompMode(len(_seqCompMode_index)-1) {
+ return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[tableLiteralLengths-0]
+ _ = x[tableOffsets-1]
+ _ = x[tableMatchLengths-2]
+}
+
+const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
+
+var _tableIndex_index = [...]uint8{0, 19, 31, 48}
+
+func (i tableIndex) String() string {
+ if i >= tableIndex(len(_tableIndex_index)-1) {
+ return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
new file mode 100644
index 000000000..55a388553
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -0,0 +1,131 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+type byteBuffer interface {
+ // Read up to 8 bytes.
+ // Returns io.ErrUnexpectedEOF if this cannot be satisfied.
+ readSmall(n int) ([]byte, error)
+
+ // Read >8 bytes.
+ // MAY use the destination slice.
+ readBig(n int, dst []byte) ([]byte, error)
+
+ // Read a single byte.
+ readByte() (byte, error)
+
+ // Skip n bytes.
+ skipN(n int64) error
+}
+
+// in-memory buffer
+type byteBuf []byte
+
+func (b *byteBuf) readSmall(n int) ([]byte, error) {
+ if debugAsserts && n > 8 {
+ panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+ }
+ bb := *b
+ if len(bb) < n {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := bb[:n]
+ *b = bb[n:]
+ return r, nil
+}
+
+func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
+ bb := *b
+ if len(bb) < n {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := bb[:n]
+ *b = bb[n:]
+ return r, nil
+}
+
+func (b *byteBuf) readByte() (byte, error) {
+ bb := *b
+ if len(bb) < 1 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ r := bb[0]
+ *b = bb[1:]
+ return r, nil
+}
+
+func (b *byteBuf) skipN(n int64) error {
+ bb := *b
+ if n < 0 {
+ return fmt.Errorf("negative skip (%d) requested", n)
+ }
+ if int64(len(bb)) < n {
+ return io.ErrUnexpectedEOF
+ }
+ *b = bb[n:]
+ return nil
+}
+
+// wrapper around a reader.
+type readerWrapper struct {
+ r io.Reader
+ tmp [8]byte
+}
+
+func (r *readerWrapper) readSmall(n int) ([]byte, error) {
+ if debugAsserts && n > 8 {
+ panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
+ }
+ n2, err := io.ReadFull(r.r, r.tmp[:n])
+ // We only really care about the actual bytes read.
+ if err != nil {
+ if err == io.EOF {
+ return nil, io.ErrUnexpectedEOF
+ }
+ if debugDecoder {
+ println("readSmall: got", n2, "want", n, "err", err)
+ }
+ return nil, err
+ }
+ return r.tmp[:n], nil
+}
+
+func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
+ if cap(dst) < n {
+ dst = make([]byte, n)
+ }
+ n2, err := io.ReadFull(r.r, dst[:n])
+ if err == io.EOF && n > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ return dst[:n2], err
+}
+
+func (r *readerWrapper) readByte() (byte, error) {
+ n2, err := io.ReadFull(r.r, r.tmp[:1])
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return 0, err
+ }
+ if n2 != 1 {
+ return 0, io.ErrUnexpectedEOF
+ }
+ return r.tmp[0], nil
+}
+
+func (r *readerWrapper) skipN(n int64) error {
+ n2, err := io.CopyN(io.Discard, r.r, n)
+ if n2 != n {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
new file mode 100644
index 000000000..0e59a242d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -0,0 +1,82 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// byteReader provides a byte reader that reads
+// little endian values from a byte stream.
+// The input stream is manually advanced.
+// The reader performs no bounds checks.
+type byteReader struct {
+ b []byte
+ off int
+}
+
+// advance the stream b n bytes.
+func (b *byteReader) advance(n uint) {
+ b.off += int(n)
+}
+
+// overread returns whether we have advanced too far.
+func (b *byteReader) overread() bool {
+ return b.off > len(b.b)
+}
+
+// Int32 returns a little endian int32 starting at current offset.
+func (b byteReader) Int32() int32 {
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
+ v3 := int32(b2[3])
+ v2 := int32(b2[2])
+ v1 := int32(b2[1])
+ v0 := int32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint8 returns the next byte
+func (b *byteReader) Uint8() uint8 {
+ v := b.b[b.off]
+ return v
+}
+
+// Uint32 returns a little endian uint32 starting at current offset.
+func (b byteReader) Uint32() uint32 {
+ if r := b.remain(); r < 4 {
+ // Very rare
+ v := uint32(0)
+ for i := 1; i <= r; i++ {
+ v = (v << 8) | uint32(b.b[len(b.b)-i])
+ }
+ return v
+ }
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
+ v3 := uint32(b2[3])
+ v2 := uint32(b2[2])
+ v1 := uint32(b2[1])
+ v0 := uint32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// Uint32NC returns a little endian uint32 starting at current offset.
+// The caller must be sure if there are at least 4 bytes left.
+func (b byteReader) Uint32NC() uint32 {
+ b2 := b.b[b.off:]
+ b2 = b2[:4]
+ v3 := uint32(b2[3])
+ v2 := uint32(b2[2])
+ v1 := uint32(b2[1])
+ v0 := uint32(b2[0])
+ return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
+}
+
+// unread returns the unread portion of the input.
+func (b byteReader) unread() []byte {
+ return b.b[b.off:]
+}
+
+// remain will return the number of bytes remaining.
+func (b byteReader) remain() int {
+ return len(b.b) - b.off
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
new file mode 100644
index 000000000..6a5a2988b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -0,0 +1,261 @@
+// Copyright 2020+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// HeaderMaxSize is the maximum size of a Frame and Block Header.
+// If less is sent to Header.Decode it *may* still contain enough information.
+const HeaderMaxSize = 14 + 3
+
+// Header contains information about the first frame and block within that.
+type Header struct {
+ // SingleSegment specifies whether the data is to be decompressed into a
+ // single contiguous memory segment.
+ // It implies that WindowSize is invalid and that FrameContentSize is valid.
+ SingleSegment bool
+
+ // WindowSize is the window of data to keep while decoding.
+ // Will only be set if SingleSegment is false.
+ WindowSize uint64
+
+ // Dictionary ID.
+ // If 0, no dictionary.
+ DictionaryID uint32
+
+ // HasFCS specifies whether FrameContentSize has a valid value.
+ HasFCS bool
+
+ // FrameContentSize is the expected uncompressed size of the entire frame.
+ FrameContentSize uint64
+
+ // Skippable will be true if the frame is meant to be skipped.
+ // This implies that FirstBlock.OK is false.
+ Skippable bool
+
+ // SkippableID is the user-specific ID for the skippable frame.
+ // Valid values are between 0 to 15, inclusive.
+ SkippableID int
+
+ // SkippableSize is the length of the user data to skip following
+ // the header.
+ SkippableSize uint32
+
+ // HeaderSize is the raw size of the frame header.
+ //
+ // For normal frames, it includes the size of the magic number and
+ // the size of the header (per section 3.1.1.1).
+ // It does not include the size for any data blocks (section 3.1.1.2) nor
+ // the size for the trailing content checksum.
+ //
+ // For skippable frames, this counts the size of the magic number
+ // along with the size of the size field of the payload.
+ // It does not include the size of the skippable payload itself.
+ // The total frame size is the HeaderSize plus the SkippableSize.
+ HeaderSize int
+
+ // First block information.
+ FirstBlock struct {
+ // OK will be set if first block could be decoded.
+ OK bool
+
+ // Is this the last block of a frame?
+ Last bool
+
+ // Is the data compressed?
+ // If true CompressedSize will be populated.
+ // Unfortunately DecompressedSize cannot be determined
+ // without decoding the blocks.
+ Compressed bool
+
+ // DecompressedSize is the expected decompressed size of the block.
+ // Will be 0 if it cannot be determined.
+ DecompressedSize int
+
+ // CompressedSize of the data in the block.
+ // Does not include the block header.
+ // Will be equal to DecompressedSize if not Compressed.
+ CompressedSize int
+ }
+
+ // If set there is a checksum present for the block content.
+ // The checksum field at the end is always 4 bytes long.
+ HasCheckSum bool
+}
+
+// Decode the header from the beginning of the stream.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) Decode(in []byte) error {
+ _, err := h.DecodeAndStrip(in)
+ return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
+ *h = Header{}
+ if len(in) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ h.HeaderSize += 4
+ b, in := in[:4], in[4:]
+ if string(b) != frameMagic {
+ if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
+ return nil, ErrMagicMismatch
+ }
+ if len(in) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ h.HeaderSize += 4
+ h.Skippable = true
+ h.SkippableID = int(b[0] & 0xf)
+ h.SkippableSize = binary.LittleEndian.Uint32(in)
+ return in[4:], nil
+ }
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+ if len(in) < 1 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ fhd, in := in[0], in[1:]
+ h.HeaderSize++
+ h.SingleSegment = fhd&(1<<5) != 0
+ h.HasCheckSum = fhd&(1<<2) != 0
+ if fhd&(1<<3) != 0 {
+ return nil, errors.New("reserved bit set on frame header")
+ }
+
+ if !h.SingleSegment {
+ if len(in) < 1 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ var wd byte
+ wd, in = in[0], in[1:]
+ h.HeaderSize++
+ windowLog := 10 + (wd >> 3)
+ windowBase := uint64(1) << windowLog
+ windowAdd := (windowBase / 8) * uint64(wd&0x7)
+ h.WindowSize = windowBase + windowAdd
+ }
+
+ // Read Dictionary_ID
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+ if size := fhd & 3; size != 0 {
+ if size == 3 {
+ size = 4
+ }
+ if len(in) < int(size) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b, in = in[:size], in[size:]
+ h.HeaderSize += int(size)
+ switch len(b) {
+ case 1:
+ h.DictionaryID = uint32(b[0])
+ case 2:
+ h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
+ case 4:
+ h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ }
+ }
+
+ // Read Frame_Content_Size
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+ var fcsSize int
+ v := fhd >> 6
+ switch v {
+ case 0:
+ if h.SingleSegment {
+ fcsSize = 1
+ }
+ default:
+ fcsSize = 1 << v
+ }
+
+ if fcsSize > 0 {
+ h.HasFCS = true
+ if len(in) < fcsSize {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b, in = in[:fcsSize], in[fcsSize:]
+ h.HeaderSize += int(fcsSize)
+ switch len(b) {
+ case 1:
+ h.FrameContentSize = uint64(b[0])
+ case 2:
+ // When FCS_Field_Size is 2, the offset of 256 is added.
+ h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+ case 4:
+ h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+ case 8:
+ d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+ h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+ }
+ }
+
+ // Frame Header done, we will not fail from now on.
+ if len(in) < 3 {
+ return in, nil
+ }
+ tmp := in[:3]
+ bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
+ h.FirstBlock.Last = bh&1 != 0
+ blockType := blockType((bh >> 1) & 3)
+ // find size.
+ cSize := int(bh >> 3)
+ switch blockType {
+ case blockTypeReserved:
+ return in, nil
+ case blockTypeRLE:
+ h.FirstBlock.Compressed = true
+ h.FirstBlock.DecompressedSize = cSize
+ h.FirstBlock.CompressedSize = 1
+ case blockTypeCompressed:
+ h.FirstBlock.Compressed = true
+ h.FirstBlock.CompressedSize = cSize
+ case blockTypeRaw:
+ h.FirstBlock.DecompressedSize = cSize
+ h.FirstBlock.CompressedSize = cSize
+ default:
+ panic("Invalid block type")
+ }
+
+ h.FirstBlock.OK = true
+ return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+ if h.Skippable {
+ magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+ magic[0] |= byte(h.SkippableID & 0xf)
+ dst = append(dst, magic[:]...)
+ f := h.SkippableSize
+ return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+ }
+ f := frameHeader{
+ ContentSize: h.FrameContentSize,
+ WindowSize: uint32(h.WindowSize),
+ SingleSegment: h.SingleSegment,
+ Checksum: h.HasCheckSum,
+ DictID: h.DictionaryID,
+ }
+ return f.appendTo(dst), nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
new file mode 100644
index 000000000..bbca17234
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -0,0 +1,948 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "context"
+ "encoding/binary"
+ "io"
+ "sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Decoder provides decoding of zstandard streams.
+// The decoder has been designed to operate without allocations after a warmup.
+// This means that you should store the decoder for best performance.
+// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
+// A decoder can safely be re-used even if the previous stream failed.
+// To release the resources, you must call the Close() function on a decoder.
+type Decoder struct {
+ o decoderOptions
+
+ // Unreferenced decoders, ready for use.
+ decoders chan *blockDec
+
+ // Current read position used for Reader functionality.
+ current decoderState
+
+ // sync stream decoding
+ syncStream struct {
+ decodedFrame uint64
+ br readerWrapper
+ enabled bool
+ inFrame bool
+ dstBuf []byte
+ }
+
+ frame *frameDec
+
+ // Custom dictionaries.
+ dicts map[uint32]*dict
+
+ // streamWg is the waitgroup for all streams
+ streamWg sync.WaitGroup
+}
+
+// decoderState is used for maintaining state when the decoder
+// is used for streaming.
+type decoderState struct {
+ // current block being written to stream.
+ decodeOutput
+
+ // output in order to be written to stream.
+ output chan decodeOutput
+
+ // cancel remaining output.
+ cancel context.CancelFunc
+
+ // crc of current frame
+ crc *xxhash.Digest
+
+ flushed bool
+}
+
+var (
+ // Check the interfaces we want to support.
+ _ = io.WriterTo(&Decoder{})
+ _ = io.Reader(&Decoder{})
+)
+
+// NewReader creates a new decoder.
+// A nil Reader can be provided in which case Reset can be used to start a decode.
+//
+// A Decoder can be used in two modes:
+//
+// 1) As a stream, or
+// 2) For stateless decoding using DecodeAll.
+//
+// Only a single stream can be decoded concurrently, but the same decoder
+// can run multiple concurrent stateless decodes. It is even possible to
+// use stateless decodes while a stream is being decoded.
+//
+// The Reset function can be used to initiate a new stream, which will considerably
+// reduce the allocations normally caused by NewReader.
+func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
+ initPredefined()
+ var d Decoder
+ d.o.setDefault()
+ for _, o := range opts {
+ err := o(&d.o)
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.current.crc = xxhash.New()
+ d.current.flushed = true
+
+ if r == nil {
+ d.current.err = ErrDecoderNilInput
+ }
+
+ // Transfer option dicts.
+ d.dicts = make(map[uint32]*dict, len(d.o.dicts))
+ for _, dc := range d.o.dicts {
+ d.dicts[dc.id] = dc
+ }
+ d.o.dicts = nil
+
+ // Create decoders
+ d.decoders = make(chan *blockDec, d.o.concurrent)
+ for i := 0; i < d.o.concurrent; i++ {
+ dec := newBlockDec(d.o.lowMem)
+ dec.localFrame = newFrameDec(d.o)
+ d.decoders <- dec
+ }
+
+ if r == nil {
+ return &d, nil
+ }
+ return &d, d.Reset(r)
+}
+
+// Read bytes from the decompressed stream into p.
+// Returns the number of bytes written and any error that occurred.
+// When the stream is done, io.EOF will be returned.
+func (d *Decoder) Read(p []byte) (int, error) {
+ var n int
+ for {
+ if len(d.current.b) > 0 {
+ filled := copy(p, d.current.b)
+ p = p[filled:]
+ d.current.b = d.current.b[filled:]
+ n += filled
+ }
+ if len(p) == 0 {
+ break
+ }
+ if len(d.current.b) == 0 {
+ // We have an error and no more data
+ if d.current.err != nil {
+ break
+ }
+ if !d.nextBlock(n == 0) {
+ return n, d.current.err
+ }
+ }
+ }
+ if len(d.current.b) > 0 {
+ if debugDecoder {
+ println("returning", n, "still bytes left:", len(d.current.b))
+ }
+ // Only return error at end of block
+ return n, nil
+ }
+ if d.current.err != nil {
+ d.drainOutput()
+ }
+ if debugDecoder {
+ println("returning", n, d.current.err, len(d.decoders))
+ }
+ return n, d.current.err
+}
+
+// Reset will reset the decoder the supplied stream after the current has finished processing.
+// Note that this functionality cannot be used after Close has been called.
+// Reset can be called with a nil reader to release references to the previous reader.
+// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
+// should be used.
+func (d *Decoder) Reset(r io.Reader) error {
+ if d.current.err == ErrDecoderClosed {
+ return d.current.err
+ }
+
+ d.drainOutput()
+
+ d.syncStream.br.r = nil
+ if r == nil {
+ d.current.err = ErrDecoderNilInput
+ if len(d.current.b) > 0 {
+ d.current.b = d.current.b[:0]
+ }
+ d.current.flushed = true
+ return nil
+ }
+
+ // If bytes buffer and < 5MB, do sync decoding anyway.
+ if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
+ bb2 := bb
+ if debugDecoder {
+ println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
+ }
+ b := bb2.Bytes()
+ var dst []byte
+ if cap(d.syncStream.dstBuf) > 0 {
+ dst = d.syncStream.dstBuf[:0]
+ }
+
+ dst, err := d.DecodeAll(b, dst)
+ if err == nil {
+ err = io.EOF
+ }
+ // Save output buffer
+ d.syncStream.dstBuf = dst
+ d.current.b = dst
+ d.current.err = err
+ d.current.flushed = true
+ if debugDecoder {
+ println("sync decode to", len(dst), "bytes, err:", err)
+ }
+ return nil
+ }
+ // Remove current block.
+ d.stashDecoder()
+ d.current.decodeOutput = decodeOutput{}
+ d.current.err = nil
+ d.current.flushed = false
+ d.current.d = nil
+ d.syncStream.dstBuf = nil
+
+ // Ensure no-one else is still running...
+ d.streamWg.Wait()
+ if d.frame == nil {
+ d.frame = newFrameDec(d.o)
+ }
+
+ if d.o.concurrent == 1 {
+ return d.startSyncDecoder(r)
+ }
+
+ d.current.output = make(chan decodeOutput, d.o.concurrent)
+ ctx, cancel := context.WithCancel(context.Background())
+ d.current.cancel = cancel
+ d.streamWg.Add(1)
+ go d.startStreamDecoder(ctx, r, d.current.output)
+
+ return nil
+}
+
+// drainOutput will drain the output until errEndOfStream is sent.
+func (d *Decoder) drainOutput() {
+ if d.current.cancel != nil {
+ if debugDecoder {
+ println("cancelling current")
+ }
+ d.current.cancel()
+ d.current.cancel = nil
+ }
+ if d.current.d != nil {
+ if debugDecoder {
+ printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
+ }
+ d.decoders <- d.current.d
+ d.current.d = nil
+ d.current.b = nil
+ }
+ if d.current.output == nil || d.current.flushed {
+ println("current already flushed")
+ return
+ }
+ for v := range d.current.output {
+ if v.d != nil {
+ if debugDecoder {
+ printf("re-adding decoder %p", v.d)
+ }
+ d.decoders <- v.d
+ }
+ }
+ d.current.output = nil
+ d.current.flushed = true
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
+ var n int64
+ for {
+ if len(d.current.b) > 0 {
+ n2, err2 := w.Write(d.current.b)
+ n += int64(n2)
+ if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
+ d.current.err = err2
+ } else if n2 != len(d.current.b) {
+ d.current.err = io.ErrShortWrite
+ }
+ }
+ if d.current.err != nil {
+ break
+ }
+ d.nextBlock(true)
+ }
+ err := d.current.err
+ if err != nil {
+ d.drainOutput()
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return n, err
+}
+
+// DecodeAll allows stateless decoding of a blob of bytes.
+// Output will be appended to dst, so if the destination size is known
+// you can pre-allocate the destination slice to avoid allocations.
+// DecodeAll can be used concurrently.
+// The Decoder concurrency limits will be respected.
+func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
+ if d.decoders == nil {
+ return dst, ErrDecoderClosed
+ }
+
+ // Grab a block decoder and frame decoder.
+ block := <-d.decoders
+ frame := block.localFrame
+ initialSize := len(dst)
+ defer func() {
+ if debugDecoder {
+ printf("re-adding decoder: %p", block)
+ }
+ frame.rawInput = nil
+ frame.bBuf = nil
+ if frame.history.decoders.br != nil {
+ frame.history.decoders.br.in = nil
+ }
+ d.decoders <- block
+ }()
+ frame.bBuf = input
+
+ for {
+ frame.history.reset()
+ err := frame.reset(&frame.bBuf)
+ if err != nil {
+ if err == io.EOF {
+ if debugDecoder {
+ println("frame reset return EOF")
+ }
+ return dst, nil
+ }
+ return dst, err
+ }
+ if err = d.setDict(frame); err != nil {
+ return nil, err
+ }
+ if frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
+ }
+ return dst, ErrWindowSizeExceeded
+ }
+ if frame.FrameContentSize != fcsUnknown {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if cap(dst)-len(dst) < int(frame.FrameContentSize) {
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
+
+ if cap(dst) == 0 && !d.o.limitToCap {
+ // Allocate len(input) * 2 by default if nothing is provided
+ // and we didn't get frame content size.
+ size := len(input) * 2
+ // Cap to 1 MB.
+ if size > 1<<20 {
+ size = 1 << 20
+ }
+ if uint64(size) > d.o.maxDecodedSize {
+ size = int(d.o.maxDecodedSize)
+ }
+ dst = make([]byte, 0, size)
+ }
+
+ dst, err = frame.runDecoder(dst, block)
+ if err != nil {
+ return dst, err
+ }
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
+ if len(frame.bBuf) == 0 {
+ if debugDecoder {
+ println("frame dbuf empty")
+ }
+ break
+ }
+ }
+ return dst, nil
+}
+
+// nextBlock returns the next block.
+// If an error occurs d.err will be set.
+// Optionally the function can block for new output.
+// If non-blocking mode is used the returned boolean will be false
+// if no data was available without blocking.
+func (d *Decoder) nextBlock(blocking bool) (ok bool) {
+ if d.current.err != nil {
+ // Keep error state.
+ return false
+ }
+ d.current.b = d.current.b[:0]
+
+ // SYNC:
+ if d.syncStream.enabled {
+ if !blocking {
+ return false
+ }
+ ok = d.nextBlockSync()
+ if !ok {
+ d.stashDecoder()
+ }
+ return ok
+ }
+
+ //ASYNC:
+ d.stashDecoder()
+ if blocking {
+ d.current.decodeOutput, ok = <-d.current.output
+ } else {
+ select {
+ case d.current.decodeOutput, ok = <-d.current.output:
+ default:
+ return false
+ }
+ }
+ if !ok {
+ // This should not happen, so signal error state...
+ d.current.err = io.ErrUnexpectedEOF
+ return false
+ }
+ next := d.current.decodeOutput
+ if next.d != nil && next.d.async.newHist != nil {
+ d.current.crc.Reset()
+ }
+ if debugDecoder {
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
+ println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
+ }
+
+ if d.o.ignoreChecksum {
+ return true
+ }
+
+ if len(next.b) > 0 {
+ d.current.crc.Write(next.b)
+ }
+ if next.err == nil && next.d != nil && next.d.hasCRC {
+ got := uint32(d.current.crc.Sum64())
+ if got != next.d.checkCRC {
+ if debugDecoder {
+ printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
+ }
+ d.current.err = ErrCRCMismatch
+ } else {
+ if debugDecoder {
+ printf("CRC ok %08x\n", got)
+ }
+ }
+ }
+
+ return true
+}
+
+func (d *Decoder) nextBlockSync() (ok bool) {
+ if d.current.d == nil {
+ d.current.d = <-d.decoders
+ }
+ for len(d.current.b) == 0 {
+ if !d.syncStream.inFrame {
+ d.frame.history.reset()
+ d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err == nil {
+ d.current.err = d.setDict(d.frame)
+ }
+ if d.current.err != nil {
+ return false
+ }
+ if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
+ d.current.err = ErrDecoderSizeExceeded
+ return false
+ }
+
+ d.syncStream.decodedFrame = 0
+ d.syncStream.inFrame = true
+ }
+ d.current.err = d.frame.next(d.current.d)
+ if d.current.err != nil {
+ return false
+ }
+ d.frame.history.ensureBlock()
+ if debugDecoder {
+ println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
+ }
+ histBefore := len(d.frame.history.b)
+ d.current.err = d.current.d.decodeBuf(&d.frame.history)
+
+ if d.current.err != nil {
+ println("error after:", d.current.err)
+ return false
+ }
+ d.current.b = d.frame.history.b[histBefore:]
+ if debugDecoder {
+ println("history after:", len(d.frame.history.b))
+ }
+
+ // Check frame size (before CRC)
+ d.syncStream.decodedFrame += uint64(len(d.current.b))
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeExceeded
+ return false
+ }
+
+ // Check FCS
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeMismatch
+ return false
+ }
+
+ // Update/Check CRC
+ if d.frame.HasCheckSum {
+ if !d.o.ignoreChecksum {
+ d.frame.crc.Write(d.current.b)
+ }
+ if d.current.d.Last {
+ if !d.o.ignoreChecksum {
+ d.current.err = d.frame.checkCRC()
+ } else {
+ d.current.err = d.frame.consumeCRC()
+ }
+ if d.current.err != nil {
+ println("CRC error:", d.current.err)
+ return false
+ }
+ }
+ }
+ d.syncStream.inFrame = !d.current.d.Last
+ }
+ return true
+}
+
+func (d *Decoder) stashDecoder() {
+ if d.current.d != nil {
+ if debugDecoder {
+ printf("re-adding current decoder %p", d.current.d)
+ }
+ d.decoders <- d.current.d
+ d.current.d = nil
+ }
+}
+
+// Close will release all resources.
+// It is NOT possible to reuse the decoder after this.
+func (d *Decoder) Close() {
+ if d.current.err == ErrDecoderClosed {
+ return
+ }
+ d.drainOutput()
+ if d.current.cancel != nil {
+ d.current.cancel()
+ d.streamWg.Wait()
+ d.current.cancel = nil
+ }
+ if d.decoders != nil {
+ close(d.decoders)
+ for dec := range d.decoders {
+ dec.Close()
+ }
+ d.decoders = nil
+ }
+ if d.current.d != nil {
+ d.current.d.Close()
+ d.current.d = nil
+ }
+ d.current.err = ErrDecoderClosed
+}
+
+// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
+// Any changes to the decoder will be reflected, so the returned ReadCloser
+// can be reused along with the decoder.
+// io.WriterTo is also supported by the returned ReadCloser.
+func (d *Decoder) IOReadCloser() io.ReadCloser {
+ return closeWrapper{d: d}
+}
+
+// closeWrapper wraps a function call as a closer.
+type closeWrapper struct {
+ d *Decoder
+}
+
+// WriteTo forwards WriteTo calls to the decoder.
+func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
+ return c.d.WriteTo(w)
+}
+
+// Read forwards read calls to the decoder.
+func (c closeWrapper) Read(p []byte) (n int, err error) {
+ return c.d.Read(p)
+}
+
+// Close closes the decoder.
+func (c closeWrapper) Close() error {
+ c.d.Close()
+ return nil
+}
+
+type decodeOutput struct {
+ d *blockDec
+ b []byte
+ err error
+}
+
+func (d *Decoder) startSyncDecoder(r io.Reader) error {
+ d.frame.history.reset()
+ d.syncStream.br = readerWrapper{r: r}
+ d.syncStream.inFrame = false
+ d.syncStream.enabled = true
+ d.syncStream.decodedFrame = 0
+ return nil
+}
+
+// Create Decoder:
+// ASYNC:
+// Spawn 3 go routines.
+// 0: Read frames and decode block literals.
+// 1: Decode sequences.
+// 2: Execute sequences, send to output.
+func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
+ defer d.streamWg.Done()
+ br := readerWrapper{r: r}
+
+ var seqDecode = make(chan *blockDec, d.o.concurrent)
+ var seqExecute = make(chan *blockDec, d.o.concurrent)
+
+ // Async 1: Decode sequences...
+ go func() {
+ var hist history
+ var hasErr bool
+
+ for block := range seqDecode {
+ if hasErr {
+ if block != nil {
+ seqExecute <- block
+ }
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
+ }
+ hist.reset()
+ hist.decoders = block.async.newHist.decoders
+ hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqExecute <- block
+ continue
+ }
+
+ hist.decoders.literals = block.async.literals
+ block.err = block.prepareSequences(block.async.seqData, &hist)
+ if debugDecoder && block.err != nil {
+ println("prepareSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ if block.err == nil {
+ block.err = block.decodeSequences(&hist)
+ if debugDecoder && block.err != nil {
+ println("decodeSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
+ block.async.seqSize = hist.decoders.seqSize
+ }
+ seqExecute <- block
+ }
+ close(seqExecute)
+ hist.reset()
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // Async 3: Execute sequences...
+ frameHistCache := d.frame.history.b
+ go func() {
+ var hist history
+ var decodedFrame uint64
+ var fcs uint64
+ var hasErr bool
+ for block := range seqExecute {
+ out := decodeOutput{err: block.err, d: block}
+ if block.err != nil || hasErr {
+ hasErr = true
+ output <- out
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 2: new history")
+ }
+ hist.reset()
+ hist.windowSize = block.async.newHist.windowSize
+ hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+
+ if cap(hist.b) < hist.allocFrameBuffer {
+ if cap(frameHistCache) >= hist.allocFrameBuffer {
+ hist.b = frameHistCache
+ } else {
+ hist.b = make([]byte, 0, hist.allocFrameBuffer)
+ println("Alloc history sized", hist.allocFrameBuffer)
+ }
+ }
+ hist.b = hist.b[:0]
+ fcs = block.async.fcs
+ decodedFrame = 0
+ }
+ do := decodeOutput{err: block.err, d: block}
+ switch block.Type {
+ case blockTypeRLE:
+ if debugDecoder {
+ println("add rle block length:", block.RLESize)
+ }
+
+ if cap(block.dst) < int(block.RLESize) {
+ if block.lowMem {
+ block.dst = make([]byte, block.RLESize)
+ } else {
+ block.dst = make([]byte, maxCompressedBlockSize)
+ }
+ }
+ block.dst = block.dst[:block.RLESize]
+ v := block.data[0]
+ for i := range block.dst {
+ block.dst[i] = v
+ }
+ hist.append(block.dst)
+ do.b = block.dst
+ case blockTypeRaw:
+ if debugDecoder {
+ println("add raw block length:", len(block.data))
+ }
+ hist.append(block.data)
+ do.b = block.data
+ case blockTypeCompressed:
+ if debugDecoder {
+ println("execute with history length:", len(hist.b), "window:", hist.windowSize)
+ }
+ hist.decoders.seqSize = block.async.seqSize
+ hist.decoders.literals = block.async.literals
+ do.err = block.executeSequences(&hist)
+ hasErr = do.err != nil
+ if debugDecoder && hasErr {
+ println("executeSequences returned:", do.err)
+ }
+ do.b = block.dst
+ }
+ if !hasErr {
+ decodedFrame += uint64(len(do.b))
+ if decodedFrame > fcs {
+ println("fcs exceeded", block.Last, fcs, decodedFrame)
+ do.err = ErrFrameSizeExceeded
+ hasErr = true
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
+ do.err = ErrFrameSizeMismatch
+ hasErr = true
+ } else {
+ if debugDecoder {
+ println("fcs ok", block.Last, fcs, decodedFrame)
+ }
+ }
+ }
+ output <- do
+ }
+ close(output)
+ frameHistCache = hist.b
+ wg.Done()
+ if debugDecoder {
+ println("decoder goroutines finished")
+ }
+ hist.reset()
+ }()
+
+ var hist history
+decodeStream:
+ for {
+ var hasErr bool
+ hist.reset()
+ decodeBlock := func(block *blockDec) {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
+ }
+ return
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ return
+ }
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
+ }
+ frame := d.frame
+ if debugDecoder {
+ println("New frame...")
+ }
+ var historySent bool
+ frame.history.reset()
+ err := frame.reset(&br)
+ if debugDecoder && err != nil {
+ println("Frame decoder returned", err)
+ }
+ if err == nil {
+ err = d.setDict(frame)
+ }
+ if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
+ err = ErrDecoderSizeExceeded
+ }
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ case dec := <-d.decoders:
+ dec.sendErr(err)
+ decodeBlock(dec)
+ }
+ break decodeStream
+ }
+
+ // Go through all blocks of the frame.
+ for {
+ var dec *blockDec
+ select {
+ case <-ctx.Done():
+ break decodeStream
+ case dec = <-d.decoders:
+ // Once we have a decoder, we MUST return it.
+ }
+ err := frame.next(dec)
+ if !historySent {
+ h := frame.history
+ if debugDecoder {
+ println("Alloc History:", h.allocFrameBuffer)
+ }
+ hist.reset()
+ if h.dict != nil {
+ hist.setDict(h.dict)
+ }
+ dec.async.newHist = &h
+ dec.async.fcs = frame.FrameContentSize
+ historySent = true
+ } else {
+ dec.async.newHist = nil
+ }
+ if debugDecoder && err != nil {
+ println("next block returned error:", err)
+ }
+ dec.err = err
+ dec.hasCRC = false
+ if dec.Last && frame.HasCheckSum && err == nil {
+ crc, err := frame.rawInput.readSmall(4)
+ if len(crc) < 4 {
+ if err == nil {
+ err = io.ErrUnexpectedEOF
+
+ }
+ println("CRC missing?", err)
+ dec.err = err
+ } else {
+ dec.checkCRC = binary.LittleEndian.Uint32(crc)
+ dec.hasCRC = true
+ if debugDecoder {
+ printf("found crc to check: %08x\n", dec.checkCRC)
+ }
+ }
+ }
+ err = dec.err
+ last := dec.Last
+ decodeBlock(dec)
+ if err != nil {
+ break decodeStream
+ }
+ if last {
+ break
+ }
+ }
+ }
+ close(seqDecode)
+ wg.Wait()
+ hist.reset()
+ d.frame.history.b = frameHistCache
+}
+
+func (d *Decoder) setDict(frame *frameDec) (err error) {
+ dict, ok := d.dicts[frame.DictionaryID]
+ if ok {
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
+ frame.history.setDict(dict)
+ } else if frame.DictionaryID != 0 {
+ // A zero or missing dictionary id is ambiguous:
+ // either dictionary zero, or no dictionary. In particular,
+ // zstd --patch-from uses this id for the source file,
+ // so only return an error if the dictionary id is not zero.
+ err = ErrUnknownDictionary
+ }
+ return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
new file mode 100644
index 000000000..774c5f00f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -0,0 +1,169 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math/bits"
+ "runtime"
+)
+
+// DOption is an option for creating a decoder.
+type DOption func(*decoderOptions) error
+
+// options retains accumulated state of multiple options.
+type decoderOptions struct {
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+ maxWindowSize uint64
+ dicts []*dict
+ ignoreChecksum bool
+ limitToCap bool
+ decodeBufsBelow int
+}
+
+func (o *decoderOptions) setDefault() {
+ *o = decoderOptions{
+ // use less ram: true for now, but may change.
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ maxWindowSize: MaxWindowSize,
+ decodeBufsBelow: 128 << 10,
+ }
+ if o.concurrent > 4 {
+ o.concurrent = 4
+ }
+ o.maxDecodedSize = 64 << 30
+}
+
+// WithDecoderLowmem will set whether to use a lower amount of memory,
+// but possibly have to allocate more while running.
+func WithDecoderLowmem(b bool) DOption {
+ return func(o *decoderOptions) error { o.lowMem = b; return nil }
+}
+
+// WithDecoderConcurrency sets the number of created decoders.
+// When decoding block with DecodeAll, this will limit the number
+// of possible concurrently running decodes.
+// When decoding streams, this will limit the number of
+// inflight blocks.
+// When decoding streams and setting maximum to 1,
+// no async decoding will be done.
+// When a value of 0 is provided GOMAXPROCS will be used.
+// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
+func WithDecoderConcurrency(n int) DOption {
+ return func(o *decoderOptions) error {
+ if n < 0 {
+ return errors.New("concurrency must be at least 1")
+ }
+ if n == 0 {
+ o.concurrent = runtime.GOMAXPROCS(0)
+ } else {
+ o.concurrent = n
+ }
+ return nil
+ }
+}
+
+// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
+// non-streaming operations or maximum window size for streaming operations.
+// This can be used to control memory usage of potentially hostile content.
+// Maximum is 1 << 63 bytes. Default is 64GiB.
+func WithDecoderMaxMemory(n uint64) DOption {
+ return func(o *decoderOptions) error {
+ if n == 0 {
+ return errors.New("WithDecoderMaxMemory must be at least 1")
+ }
+ if n > 1<<63 {
+ return errors.New("WithDecoderMaxmemory must be less than 1 << 63")
+ }
+ o.maxDecodedSize = n
+ return nil
+ }
+}
+
+// WithDecoderDicts allows to register one or more dictionaries for the decoder.
+//
+// Each slice in dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// If several dictionaries with the same ID are provided, the last one will be used.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+func WithDecoderDicts(dicts ...[]byte) DOption {
+ return func(o *decoderOptions) error {
+ for _, b := range dicts {
+ d, err := loadDict(b)
+ if err != nil {
+ return err
+ }
+ o.dicts = append(o.dicts, d)
+ }
+ return nil
+ }
+}
+
+// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
+// The slice content can be arbitrary data.
+func WithDecoderDictRaw(id uint32, content []byte) DOption {
+ return func(o *decoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+ return nil
+ }
+}
+
+// WithDecoderMaxWindow allows to set a maximum window size for decodes.
+// This allows rejecting packets that will cause big memory usage.
+// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
+// If WithDecoderMaxMemory is set to a lower value, that will be used.
+// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
+func WithDecoderMaxWindow(size uint64) DOption {
+ return func(o *decoderOptions) error {
+ if size < MinWindowSize {
+ return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes")
+ }
+ if size > (1<<41)+7*(1<<38) {
+ return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB")
+ }
+ o.maxWindowSize = size
+ return nil
+ }
+}
+
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+ return func(o *decoderOptions) error {
+ o.decodeBufsBelow = size
+ return nil
+ }
+}
+
+// IgnoreChecksum allows to forcibly ignore checksum checking.
+func IgnoreChecksum(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.ignoreChecksum = b
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
new file mode 100644
index 000000000..b7b83164b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -0,0 +1,565 @@
+package zstd
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/klauspost/compress/huff0"
+)
+
+type dict struct {
+ id uint32
+
+ litEnc *huff0.Scratch
+ llDec, ofDec, mlDec sequenceDec
+ offsets [3]int
+ content []byte
+}
+
+const dictMagic = "\x37\xa4\x30\xec"
+
+// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
+const dictMaxLength = 1 << 31
+
+// ID returns the dictionary id or 0 if d is nil.
+func (d *dict) ID() uint32 {
+ if d == nil {
+ return 0
+ }
+ return d.id
+}
+
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
+ if d == nil {
+ return 0
+ }
+ return len(d.content)
+}
+
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+ if d == nil {
+ return nil
+ }
+ return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+ if d == nil {
+ return [3]int{}
+ }
+ return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+ if d == nil {
+ return nil
+ }
+ return d.litEnc
+}
+
+// Load a dictionary as described in
+// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+func loadDict(b []byte) (*dict, error) {
+ // Check static field size.
+ if len(b) <= 8+(3*4) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ d := dict{
+ llDec: sequenceDec{fse: &fseDecoder{}},
+ ofDec: sequenceDec{fse: &fseDecoder{}},
+ mlDec: sequenceDec{fse: &fseDecoder{}},
+ }
+ if string(b[:4]) != dictMagic {
+ return nil, ErrMagicMismatch
+ }
+ d.id = binary.LittleEndian.Uint32(b[4:8])
+ if d.id == 0 {
+ return nil, errors.New("dictionaries cannot have ID 0")
+ }
+
+ // Read literal table
+ var err error
+ d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
+ if err != nil {
+ return nil, fmt.Errorf("loading literal table: %w", err)
+ }
+ d.litEnc.Reuse = huff0.ReusePolicyMust
+
+ br := byteReader{
+ b: b,
+ off: 0,
+ }
+ readDec := func(i tableIndex, dec *fseDecoder) error {
+ if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
+ return err
+ }
+ if br.overread() {
+ return io.ErrUnexpectedEOF
+ }
+ err = dec.transform(symbolTableX[i])
+ if err != nil {
+ println("Transform table error:", err)
+ return err
+ }
+ if debugDecoder || debugEncoder {
+ println("Read table ok", "symbolLen:", dec.symbolLen)
+ }
+ // Set decoders as predefined so they aren't reused.
+ dec.preDefined = true
+ return nil
+ }
+
+ if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
+ return nil, err
+ }
+ if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
+ return nil, err
+ }
+ if br.remain() < 12 {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ d.offsets[0] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[1] = int(br.Uint32())
+ br.advance(4)
+ d.offsets[2] = int(br.Uint32())
+ br.advance(4)
+ if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
+ return nil, errors.New("invalid offset in dictionary")
+ }
+ d.content = make([]byte, br.remain())
+ copy(d.content, br.unread())
+ if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
+ return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
+ }
+
+ return &d, nil
+}
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+ ID() uint32
+ ContentSize() int
+ Content() []byte
+ Offsets() [3]int
+ LitEncoder() *huff0.Scratch
+}, error) {
+ initPredefined()
+ d, err := loadDict(b)
+ return d, err
+}
+
+type BuildDictOptions struct {
+ // Dictionary ID.
+ ID uint32
+
+ // Content to use to create dictionary tables.
+ Contents [][]byte
+
+ // History to use for all blocks.
+ History []byte
+
+ // Offsets to use.
+ Offsets [3]int
+
+ // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
+ // See https://github.com/facebook/zstd/issues/3724
+ CompatV155 bool
+
+ // Use the specified encoder level.
+ // The dictionary will be built using the specified encoder level,
+ // which will reflect speed and make the dictionary tailored for that level.
+ // If not set SpeedBestCompression will be used.
+ Level EncoderLevel
+
+ // DebugOut will write stats and other details here if set.
+ DebugOut io.Writer
+}
+
+func BuildDict(o BuildDictOptions) ([]byte, error) {
+ initPredefined()
+ hist := o.History
+ contents := o.Contents
+ debug := o.DebugOut != nil
+ println := func(args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprintln(o.DebugOut, args...)
+ }
+ }
+ printf := func(s string, args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprintf(o.DebugOut, s, args...)
+ }
+ }
+ print := func(args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprint(o.DebugOut, args...)
+ }
+ }
+
+ if int64(len(hist)) > dictMaxLength {
+ return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
+ }
+ if len(hist) < 8 {
+ return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
+ }
+ if len(contents) == 0 {
+ return nil, errors.New("no content provided")
+ }
+ d := dict{
+ id: o.ID,
+ litEnc: nil,
+ llDec: sequenceDec{},
+ ofDec: sequenceDec{},
+ mlDec: sequenceDec{},
+ offsets: o.Offsets,
+ content: hist,
+ }
+ block := blockEnc{lowMem: false}
+ block.init()
+ enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
+ if o.Level != 0 {
+ eOpts := encoderOptions{
+ level: o.Level,
+ blockSize: maxMatchLen,
+ windowSize: maxMatchLen,
+ dict: &d,
+ lowMem: false,
+ }
+ enc = eOpts.encoder()
+ } else {
+ o.Level = SpeedBestCompression
+ }
+ var (
+ remain [256]int
+ ll [256]int
+ ml [256]int
+ of [256]int
+ )
+ addValues := func(dst *[256]int, src []byte) {
+ for _, v := range src {
+ dst[v]++
+ }
+ }
+ addHist := func(dst *[256]int, src *[256]uint32) {
+ for i, v := range src {
+ dst[i] += int(v)
+ }
+ }
+ seqs := 0
+ nUsed := 0
+ litTotal := 0
+ newOffsets := make(map[uint32]int, 1000)
+ for _, b := range contents {
+ block.reset(nil)
+ if len(b) < 8 {
+ continue
+ }
+ nUsed++
+ enc.Reset(&d, true)
+ enc.Encode(&block, b)
+ addValues(&remain, block.literals)
+ litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
+ seqs += len(block.sequences)
+ block.genCodes()
+ addHist(&ll, block.coders.llEnc.Histogram())
+ addHist(&ml, block.coders.mlEnc.Histogram())
+ addHist(&of, block.coders.ofEnc.Histogram())
+ for i, seq := range block.sequences {
+ if i > 3 {
+ break
+ }
+ offset := seq.offset
+ if offset == 0 {
+ continue
+ }
+ if int(offset) >= len(o.History) {
+ continue
+ }
+ if offset > 3 {
+ newOffsets[offset-3]++
+ } else {
+ newOffsets[uint32(o.Offsets[offset-1])]++
+ }
+ }
+ }
+ // Find most used offsets.
+ var sortedOffsets []uint32
+ for k := range newOffsets {
+ sortedOffsets = append(sortedOffsets, k)
+ }
+ sort.Slice(sortedOffsets, func(i, j int) bool {
+ a, b := sortedOffsets[i], sortedOffsets[j]
+ if a == b {
+ // Prefer the longer offset
+ return sortedOffsets[i] > sortedOffsets[j]
+ }
+ return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
+ })
+ if len(sortedOffsets) > 3 {
+ if debug {
+ print("Offsets:")
+ for i, v := range sortedOffsets {
+ if i > 20 {
+ break
+ }
+ printf("[%d: %d],", v, newOffsets[v])
+ }
+ println("")
+ }
+
+ sortedOffsets = sortedOffsets[:3]
+ }
+ for i, v := range sortedOffsets {
+ o.Offsets[i] = int(v)
+ }
+ if debug {
+ println("New repeat offsets", o.Offsets)
+ }
+
+ if nUsed == 0 || seqs == 0 {
+ return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
+ }
+ if debug {
+ println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
+ }
+ if seqs/nUsed < 512 {
+ // Use 512 as minimum.
+ nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
+ }
+ copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
+ hist := dst.Histogram()
+ var maxSym uint8
+ var maxCount int
+ var fakeLength int
+ for i, v := range src {
+ if v > 0 {
+ v = v / nUsed
+ if v == 0 {
+ v = 1
+ }
+ }
+ if v > maxCount {
+ maxCount = v
+ }
+ if v != 0 {
+ maxSym = uint8(i)
+ }
+ fakeLength += v
+ hist[i] = uint32(v)
+ }
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
+ dst.HistogramFinished(maxSym, maxCount)
+ dst.reUsed = false
+ dst.useRLE = false
+ err := dst.normalizeCount(fakeLength)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
+ }
+ return dst.writeCount(nil)
+ }
+ if debug {
+ print("Literal lengths: ")
+ }
+ llTable, err := copyHist(block.coders.llEnc, &ll)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ print("Match lengths: ")
+ }
+ mlTable, err := copyHist(block.coders.mlEnc, &ml)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ print("Offsets: ")
+ }
+ ofTable, err := copyHist(block.coders.ofEnc, &of)
+ if err != nil {
+ return nil, err
+ }
+
+ // Literal table
+ avgSize := litTotal
+ if avgSize > huff0.BlockSizeMax/2 {
+ avgSize = huff0.BlockSizeMax / 2
+ }
+ huffBuff := make([]byte, 0, avgSize)
+ // Target size
+ div := litTotal / avgSize
+ if div < 1 {
+ div = 1
+ }
+ if debug {
+ println("Huffman weights:")
+ }
+ for i, n := range remain[:] {
+ if n > 0 {
+ n = n / div
+ // Allow all entries to be represented.
+ if n == 0 {
+ n = 1
+ }
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ if debug {
+ printf("[%d: %d], ", i, n)
+ }
+ }
+ }
+ if o.CompatV155 && remain[255]/div == 0 {
+ huffBuff = append(huffBuff, 255)
+ }
+ scratch := &huff0.Scratch{TableLog: 11}
+ for tries := 0; tries < 255; tries++ {
+ scratch = &huff0.Scratch{TableLog: 11}
+ _, _, err = huff0.Compress1X(huffBuff, scratch)
+ if err == nil {
+ break
+ }
+ if debug {
+ printf("Try %d: Huffman error: %v\n", tries+1, err)
+ }
+ huffBuff = huffBuff[:0]
+ if tries == 250 {
+ if debug {
+ println("Huffman: Bailing out with predefined table")
+ }
+
+ // Bail out.... Just generate something
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
+ for i := 0; i < 128; i++ {
+ huffBuff = append(huffBuff, byte(i))
+ }
+ continue
+ }
+ if errors.Is(err, huff0.ErrIncompressible) {
+ // Try truncating least common.
+ for i, n := range remain[:] {
+ if n > 0 {
+ n = n / (div * (i + 1))
+ if n > 0 {
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ }
+ }
+ }
+ if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
+ huffBuff = append(huffBuff, 255)
+ }
+ if len(huffBuff) == 0 {
+ huffBuff = append(huffBuff, 0, 255)
+ }
+ }
+ if errors.Is(err, huff0.ErrUseRLE) {
+ for i, n := range remain[:] {
+ n = n / (div * (i + 1))
+ // Allow all entries to be represented.
+ if n == 0 {
+ n = 1
+ }
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ }
+ }
+ }
+
+ var out bytes.Buffer
+ out.Write([]byte(dictMagic))
+ out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
+ out.Write(scratch.OutTable)
+ if debug {
+ println("huff table:", len(scratch.OutTable), "bytes")
+ println("of table:", len(ofTable), "bytes")
+ println("ml table:", len(mlTable), "bytes")
+ println("ll table:", len(llTable), "bytes")
+ }
+ out.Write(ofTable)
+ out.Write(mlTable)
+ out.Write(llTable)
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
+ out.Write(hist)
+ if debug {
+ _, err := loadDict(out.Bytes())
+ if err != nil {
+ panic(err)
+ }
+ i, err := InspectDictionary(out.Bytes())
+ if err != nil {
+ panic(err)
+ }
+ println("ID:", i.ID())
+ println("Content size:", i.ContentSize())
+ println("Encoder:", i.LitEncoder() != nil)
+ println("Offsets:", i.Offsets())
+ var totalSize int
+ for _, b := range contents {
+ totalSize += len(b)
+ }
+
+ encWith := func(opts ...EOption) int {
+ enc, err := NewWriter(nil, opts...)
+ if err != nil {
+ panic(err)
+ }
+ defer enc.Close()
+ var dst []byte
+ var totalSize int
+ for _, b := range contents {
+ dst = enc.EncodeAll(b, dst[:0])
+ totalSize += len(dst)
+ }
+ return totalSize
+ }
+ plain := encWith(WithEncoderLevel(o.Level))
+ withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
+ println("Input size:", totalSize)
+ println("Plain Compressed:", plain)
+ println("Dict Compressed:", withDict)
+ println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
+ }
+ return out.Bytes(), nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
new file mode 100644
index 000000000..5ca46038a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -0,0 +1,173 @@
+package zstd
+
+import (
+ "fmt"
+ "math/bits"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+const (
+ dictShardBits = 6
+)
+
+type fastBase struct {
+ // cur is the offset at the start of hist
+ cur int32
+ // maximum offset. Should be at least 2x block size.
+ maxMatchOff int32
+ bufferReset int32
+ hist []byte
+ crc *xxhash.Digest
+ tmp [8]byte
+ blk *blockEnc
+ lastDictID uint32
+ lowMem bool
+}
+
+// CRC returns the underlying CRC writer.
+func (e *fastBase) CRC() *xxhash.Digest {
+ return e.crc
+}
+
+// AppendCRC will append the CRC to the destination slice and return it.
+func (e *fastBase) AppendCRC(dst []byte) []byte {
+ crc := e.crc.Sum(e.tmp[:0])
+ dst = append(dst, crc[7], crc[6], crc[5], crc[4])
+ return dst
+}
+
+// WindowSize returns the window size of the encoder,
+// or a window size small enough to contain the input size, if > 0.
+func (e *fastBase) WindowSize(size int64) int32 {
+ if size > 0 && size < int64(e.maxMatchOff) {
+ b := int32(1) << uint(bits.Len(uint(size)))
+ // Keep minimum window.
+ if b < 1024 {
+ b = 1024
+ }
+ return b
+ }
+ return e.maxMatchOff
+}
+
+// Block returns the current block.
+func (e *fastBase) Block() *blockEnc {
+ return e.blk
+}
+
+func (e *fastBase) addBlock(src []byte) int32 {
+ if debugAsserts && e.cur > e.bufferReset {
+ panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
+ }
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.ensureHist(len(src))
+ } else {
+ if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
+ panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
+ }
+ // Move down
+ offset := int32(len(e.hist)) - e.maxMatchOff
+ copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:e.maxMatchOff]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// ensureHist will ensure that history can keep at least this many bytes.
+func (e *fastBase) ensureHist(n int) {
+ if cap(e.hist) >= n {
+ return
+ }
+ l := e.maxMatchOff
+ if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
+ l += maxCompressedBlockSize
+ } else {
+ l += e.maxMatchOff
+ }
+ // Make it at least 1MB.
+ if l < 1<<20 && !e.lowMem {
+ l = 1 << 20
+ }
+ // Make it at least the requested size.
+ if l < int32(n) {
+ l = int32(n)
+ }
+ e.hist = make([]byte, 0, l)
+}
+
+// useBlock will replace the block with the provided one,
+// but transfer recent offsets from the previous.
+func (e *fastBase) UseBlock(enc *blockEnc) {
+ enc.reset(e.blk)
+ e.blk = enc
+}
+
+func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
+ if debugAsserts {
+ if s < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if t < 0 {
+ err := fmt.Sprintf("s (%d) < 0", s)
+ panic(err)
+ }
+ if s-t > e.maxMatchOff {
+ err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
+ panic(err)
+ }
+ if len(src)-int(s) > maxCompressedBlockSize {
+ panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
+ }
+ }
+ return int32(matchLen(src[s:], src[t:]))
+}
+
+// Reset the encoding table.
+func (e *fastBase) resetBase(d *dict, singleBlock bool) {
+ if e.blk == nil {
+ e.blk = &blockEnc{lowMem: e.lowMem}
+ e.blk.init()
+ } else {
+ e.blk.reset(nil)
+ }
+ e.blk.initNewEncode()
+ if e.crc == nil {
+ e.crc = xxhash.New()
+ } else {
+ e.crc.Reset()
+ }
+ e.blk.dictLitEnc = nil
+ if d != nil {
+ low := e.lowMem
+ if singleBlock {
+ e.lowMem = true
+ }
+ e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
+ e.lowMem = low
+ }
+
+ // We offset current position so everything will be out of reach.
+ // If above reset line, history will be purged.
+ if e.cur < e.bufferReset {
+ e.cur += e.maxMatchOff + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+ if d != nil {
+ // Set offsets (currently not used)
+ for i, off := range d.offsets {
+ e.blk.recentOffsets[i] = uint32(off)
+ e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
+ }
+ // Transfer litenc.
+ e.blk.dictLitEnc = d.litEnc
+ e.hist = append(e.hist, d.content...)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
new file mode 100644
index 000000000..4613724e9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -0,0 +1,560 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/klauspost/compress"
+)
+
+const (
+ bestLongTableBits = 22 // Bits used in the long match table
+ bestLongTableSize = 1 << bestLongTableBits // Size of the table
+ bestLongLen = 8 // Bytes used for table hash
+
+ // Note: Increasing the short table bits or making the hash shorter
+ // can actually lead to compression degradation since it will 'steal' more from the
+ // long match table and match offsets are quite big.
+ // This greatly depends on the type of input.
+ bestShortTableBits = 18 // Bits used in the short match table
+ bestShortTableSize = 1 << bestShortTableBits // Size of the table
+ bestShortLen = 4 // Bytes used for table hash
+
+)
+
+type match struct {
+ offset int32
+ s int32
+ length int32
+ rep int32
+ est int32
+}
+
+const highScore = maxMatchLen * 8
+
+// estBits will estimate output bits from predefined tables.
+func (m *match) estBits(bitsPerByte int32) {
+ mlc := mlCode(uint32(m.length - zstdMinMatch))
+ var ofc uint8
+ if m.rep < 0 {
+ ofc = ofCode(uint32(m.s-m.offset) + 3)
+ } else {
+ ofc = ofCode(uint32(m.rep) & 3)
+ }
+ // Cost, excluding
+ ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
+
+ // Add cost of match encoding...
+ m.est = int32(ofTT.outBits + mlTT.outBits)
+ m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
+ // Subtract savings compared to literal encoding...
+ m.est -= (m.length * bitsPerByte) >> 10
+ if m.est > 0 {
+ // Unlikely gain..
+ m.length = 0
+ m.est = highScore
+ }
+}
+
+// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type bestFastEncoder struct {
+ fastBase
+ table [bestShortTableSize]prevEntry
+ longTable [bestLongTableSize]prevEntry
+ dictTable []prevEntry
+ dictLongTable []prevEntry
+}
+
+// Encode improves compression...
+func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 4
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ e.table = [bestShortTableSize]prevEntry{}
+ e.longTable = [bestLongTableSize]prevEntry{}
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ v2 := e.table[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.table[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ // Add block to history
+ s := e.addBlock(src)
+ blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Use this to estimate literal cost.
+ // Scaled by 10 bits.
+ bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
+ // Huffman can never go < 1 bit/byte
+ if bitsPerByte < 1024 {
+ bitsPerByte = 1024
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ const kSearchStrength = 10
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+ offset3 := int32(blk.recentOffsets[2])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ const goodEnough = 250
+
+ cv := load6432(src, s)
+
+ nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
+ nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ // Set m to a match at offset if it looks like that will improve compression.
+ improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
+ delta := s - offset
+ if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
+ return
+ }
+ // Try to quick reject if we already have a long match.
+ if m.length > 16 {
+ left := len(src) - int(m.s+m.length)
+ // If we are too close to the end, keep as is.
+ if left <= 0 {
+ return
+ }
+ checkLen := m.length - (s - m.s) - 8
+ if left > 2 && checkLen > 4 {
+ // Check 4 bytes, 4 bytes from the end of the current match.
+ a := load3232(src, offset+checkLen)
+ b := load3232(src, s+checkLen)
+ if a != b {
+ return
+ }
+ }
+ }
+ l := 4 + e.matchlen(s+4, offset+4, src)
+ if m.rep <= 0 {
+ // Extend candidate match backwards as far as possible.
+ // Do not extend repeats as we can assume they are optimal
+ // and offsets change if s == nextEmit.
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
+ s--
+ offset--
+ l++
+ }
+ }
+ if debugAsserts {
+ if offset >= s {
+ panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+ }
+ if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+ panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ }
+ }
+ cand := match{offset: offset, s: s, length: l, rep: rep}
+ cand.estBits(bitsPerByte)
+ if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
+ *m = cand
+ }
+ }
+
+ best := match{s: s, est: highScore}
+ improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
+
+ if canRepeat && best.length < goodEnough {
+ if s == nextEmit {
+ // Check repeats straight after a match.
+ improve(&best, s-offset2, s, uint32(cv), 1|4)
+ improve(&best, s-offset3, s, uint32(cv), 2|4)
+ if offset1 > 1 {
+ improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
+ }
+ }
+
+ // If either no match or a non-repeat match, check at + 1
+ if best.rep <= 0 {
+ cv32 := uint32(cv >> 8)
+ spp := s + 1
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ if best.rep < 0 {
+ cv32 = uint32(cv >> 24)
+ spp += 2
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ }
+ }
+ }
+ // Load next and check...
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
+ e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+ index0 := s + 1
+
+ // Look far ahead, unless we have a really long match already...
+ if best.length < goodEnough {
+ // No match found, move forward on input, no need to check forward...
+ if best.length < 4 {
+ s += 1 + (s-nextEmit)>>(kSearchStrength-1)
+ if s >= sLimit {
+ break encodeLoop
+ }
+ continue
+ }
+
+ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
+ cv = load6432(src, s+1)
+ cv2 := load6432(src, s+2)
+ candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
+ candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
+
+ // Short at s+1
+ improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
+ // Long at s+1, s+2
+ improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
+ improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
+ if false {
+ // Short at s+3.
+ // Too often worse...
+ improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
+ }
+
+ // Start check at a fixed offset to allow for a few mismatches.
+ // For this compression level 2 yields the best results.
+ // We cannot do this if we have already indexed this position.
+ const skipBeginning = 2
+ if best.s > s-skipBeginning {
+ // See if we can find a better match by checking where the current best ends.
+ // Use that offset to see if we can find a better full match.
+ if sAt := best.s + best.length; sAt < sLimit {
+ nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+ candidateEnd := e.longTable[nextHashL]
+
+ if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ }
+ }
+ }
+ }
+ }
+
+ if debugAsserts {
+ if best.offset >= best.s {
+ panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+ }
+ if best.s < nextEmit {
+ panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+ }
+ if best.offset < s-e.maxMatchOff {
+ panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+ }
+ if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
+ panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
+ }
+ }
+
+ // We have a match, we can store the forward value
+ s = best.s
+ if best.rep > 0 {
+ var seq seq
+ seq.matchLen = uint32(best.length - zstdMinMatch)
+ addLiterals(&seq, best.s)
+
+ // Repeat. If bit 4 is set, this is a non-lit repeat.
+ seq.offset = uint32(best.rep & 3)
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index old s + 1 -> s - 1
+ s = best.s + best.length
+ nextEmit = s
+
+ // Index skipped...
+ end := s
+ if s > sLimit+4 {
+ end = sLimit + 4
+ }
+ off := index0 + e.cur
+ for index0 < end {
+ cv0 := load6432(src, index0)
+ h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+ h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+ off++
+ index0++
+ }
+
+ switch best.rep {
+ case 2, 4 | 1:
+ offset1, offset2 = offset2, offset1
+ case 3, 4 | 2:
+ offset1, offset2, offset3 = offset3, offset1, offset2
+ case 4 | 3:
+ offset1, offset2, offset3 = offset1-1, offset1, offset2
+ }
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, best.length)
+ }
+ break encodeLoop
+ }
+ continue
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ t := best.offset
+ offset1, offset2, offset3 = s-t, offset1, offset2
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Write our sequence
+ var seq seq
+ l := best.length
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+
+ // Index old s + 1 -> s - 1 or sLimit
+ end := s
+ if s > sLimit-4 {
+ end = sLimit - 4
+ }
+
+ off := index0 + e.cur
+ for index0 < end {
+ cv0 := load6432(src, index0)
+ h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
+ h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
+ index0++
+ off++
+ }
+ if s >= sLimit {
+ break encodeLoop
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ blk.recentOffsets[2] = uint32(offset3)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ e.ensureHist(len(src))
+ e.Encode(blk, src)
+}
+
+// Reset will reset and set a dictionary if not nil
+func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]prevEntry, len(e.table))
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff; i < end; i += 4 {
+ const hashLog = bestShortTableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
+ nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
+ nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
+ nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
+ e.dictTable[nextHash] = prevEntry{
+ prev: e.dictTable[nextHash].offset,
+ offset: i,
+ }
+ e.dictTable[nextHash1] = prevEntry{
+ prev: e.dictTable[nextHash1].offset,
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = prevEntry{
+ prev: e.dictTable[nextHash2].offset,
+ offset: i + 2,
+ }
+ e.dictTable[nextHash3] = prevEntry{
+ prev: e.dictTable[nextHash3].offset,
+ offset: i + 3,
+ }
+ }
+ e.lastDictID = d.id
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]prevEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ h := hashLen(cv, bestLongTableBits, bestLongLen)
+ e.dictLongTable[h] = prevEntry{
+ offset: e.maxMatchOff,
+ prev: e.dictLongTable[h].offset,
+ }
+
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ off := 8 // First to read
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[off]) << 56)
+ h := hashLen(cv, bestLongTableBits, bestLongLen)
+ e.dictLongTable[h] = prevEntry{
+ offset: i,
+ prev: e.dictLongTable[h].offset,
+ }
+ off++
+ }
+ }
+ e.lastDictID = d.id
+ }
+ // Reset table to initial state
+ copy(e.longTable[:], e.dictLongTable)
+
+ e.cur = e.maxMatchOff
+ // Reset table to initial state
+ copy(e.table[:], e.dictTable)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
new file mode 100644
index 000000000..a4f5bf91f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -0,0 +1,1252 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+ betterLongTableBits = 19 // Bits used in the long match table
+ betterLongTableSize = 1 << betterLongTableBits // Size of the table
+ betterLongLen = 8 // Bytes used for table hash
+
+ // Note: Increasing the short table bits or making the hash shorter
+ // can actually lead to compression degradation since it will 'steal' more from the
+ // long match table and match offsets are quite big.
+ // This greatly depends on the type of input.
+ betterShortTableBits = 13 // Bits used in the short match table
+ betterShortTableSize = 1 << betterShortTableBits // Size of the table
+ betterShortLen = 5 // Bytes used for table hash
+
+ betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
+ betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
+
+ betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table
+ betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
+)
+
+type prevEntry struct {
+ offset int32
+ prev int32
+}
+
+// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
+// The long match table contains the previous entry with the same hash,
+// effectively making it a "chain" of length 2.
+// When we find a long match we choose between the two values and select the longest.
+// When we find a short match, after checking the long, we check if we can find a long at n+1
+// and that it is longer (lazy matching).
+type betterFastEncoder struct {
+ fastBase
+ table [betterShortTableSize]tableEntry
+ longTable [betterLongTableSize]prevEntry
+}
+
+type betterFastEncoderDict struct {
+ betterFastEncoder
+ dictTable []tableEntry
+ dictLongTable []prevEntry
+ shortTableShardDirty [betterShortTableShardCnt]bool
+ longTableShardDirty [betterLongTableShardCnt]bool
+ allDirty bool
+}
+
+// Encode improves compression...
+func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ e.table = [betterShortTableSize]tableEntry{}
+ e.longTable = [betterLongTableSize]prevEntry{}
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Add block to history
+ s := e.addBlock(src)
+ blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 9
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+ var matched, index0 int32
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ off := s + e.cur
+ e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+ e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+ index0 = s + 1
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index match start+1 (long) -> s - 1
+ index0 := s + repOff
+ s += lenght + repOff
+
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ const repOff2 = 1
+
+ // We deviate from the reference encoder and also check offset 2.
+ // Still slower and not much better, so disabled.
+ // repIndex = s - offset2 + repOff2
+ if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+ // Consider history as well.
+ var seq seq
+ lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 2
+ seq.offset = 2
+ if debugSequences {
+ println("repeat sequence 2", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ s += lenght + repOff2
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ // Swap offsets
+ offset1, offset2 = offset2, offset1
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := candidateL.offset - e.cur
+ coffsetLP := candidateL.prev - e.cur
+
+ // Check if we have a long match.
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetL+8, src) + 8
+ t = coffsetL
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+ if prevMatch > matched {
+ matched = prevMatch
+ t = coffsetLP
+ }
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ }
+ break
+ }
+
+ // Check if we have a long match on prev.
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+ t = coffsetLP
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ coffsetS := candidateS.offset - e.cur
+
+ // Check if we have a short match.
+ if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = candidateL.offset - e.cur
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+ }
+
+ // Check prev long...
+ coffsetL = candidateL.prev - e.cur
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match (after short)")
+ }
+ break
+ }
+ }
+ t = coffsetS
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // Try to find a better match by searching for a long match at the end of the current best match
+ if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
+ nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
+ candidateL := e.longTable[nextHashL]
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ // Found a long match, at least 4 bytes.
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
+ if matchedNext > matched {
+ t = coffsetL
+ s = s2
+ matched = matchedNext
+ if debugMatches {
+ println("long match at end-of-match")
+ }
+ }
+ }
+
+ // Check prev long...
+ if true {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ // Found a long match, at least 4 bytes.
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
+ if matchedNext > matched {
+ t = coffsetL
+ s = s2
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match at end-of-match")
+ }
+ }
+ }
+ }
+ }
+ // A match has been found. Update recent offsets.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the n-byte match as long as possible.
+ l := matched
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) -> s - 1
+ off := index0 + e.cur
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ index0 += 2
+ off += 2
+ }
+
+ cv = load6432(src, s)
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+ e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ e.ensureHist(len(src))
+ e.Encode(blk, src)
+}
+
+// Encode improves compression...
+func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = prevEntry{}
+ }
+ e.cur = e.maxMatchOff
+ e.allDirty = true
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ v2 := e.longTable[i].prev
+ if v < minOff {
+ v = 0
+ v2 = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ if v2 < minOff {
+ v2 = 0
+ } else {
+ v2 = v2 - e.cur + e.maxMatchOff
+ }
+ }
+ e.longTable[i] = prevEntry{
+ offset: v,
+ prev: v2,
+ }
+ }
+ e.allDirty = true
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 9
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+ var matched, index0 int32
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ off := s + e.cur
+ e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+ e.markShortShardDirty(nextHashS)
+ index0 = s + 1
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Index match start+1 (long) -> s - 1
+ s += lenght + repOff
+
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ const repOff2 = 1
+
+ // We deviate from the reference encoder and also check offset 2.
+ // Still slower and not much better, so disabled.
+ // repIndex = s - offset2 + repOff2
+ if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
+ // Consider history as well.
+ var seq seq
+ lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 2
+ seq.offset = 2
+ if debugSequences {
+ println("repeat sequence 2", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ s += lenght + repOff2
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+
+ // Index skipped...
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ off := index0 + e.cur
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ }
+ cv = load6432(src, s)
+ // Swap offsets
+ offset1, offset2 = offset2, offset1
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := candidateL.offset - e.cur
+ coffsetLP := candidateL.prev - e.cur
+
+ // Check if we have a long match.
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetL+8, src) + 8
+ t = coffsetL
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
+ if prevMatch > matched {
+ matched = prevMatch
+ t = coffsetLP
+ }
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ }
+ break
+ }
+
+ // Check if we have a long match on prev.
+ if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
+ // Found a long match, at least 8 bytes.
+ matched = e.matchlen(s+8, coffsetLP+8, src) + 8
+ t = coffsetLP
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ coffsetS := candidateS.offset - e.cur
+
+ // Check if we have a short match.
+ if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ matched = e.matchlen(s+4, coffsetS+4, src) + 4
+
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = candidateL.offset - e.cur
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
+ e.markLongShardDirty(nextHashL)
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+ }
+
+ // Check prev long...
+ coffsetL = candidateL.prev - e.cur
+ if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
+ // Found a long match, at least 8 bytes.
+ matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
+ if matchedNext > matched {
+ t = coffsetL
+ s += checkAt
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match (after short)")
+ }
+ break
+ }
+ }
+ t = coffsetS
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // Try to find a better match by searching for a long match at the end of the current best match
+ if s+matched < sLimit {
+ nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
+ cv := load3232(src, s)
+ candidateL := e.longTable[nextHashL]
+ coffsetL := candidateL.offset - e.cur - matched
+ if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ // Found a long match, at least 4 bytes.
+ matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ if matchedNext > matched {
+ t = coffsetL
+ matched = matchedNext
+ if debugMatches {
+ println("long match at end-of-match")
+ }
+ }
+ }
+
+ // Check prev long...
+ if true {
+ coffsetL = candidateL.prev - e.cur - matched
+ if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ // Found a long match, at least 4 bytes.
+ matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ if matchedNext > matched {
+ t = coffsetL
+ matched = matchedNext
+ if debugMatches {
+ println("prev long match at end-of-match")
+ }
+ }
+ }
+ }
+ }
+ // A match has been found. Update recent offsets.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the n-byte match as long as possible.
+ l := matched
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) -> s - 1
+ off := index0 + e.cur
+ for index0 < s-1 {
+ cv0 := load6432(src, index0)
+ cv1 := cv0 >> 8
+ h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
+ e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
+ e.markLongShardDirty(h0)
+ h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
+ e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
+ e.markShortShardDirty(h1)
+ index0 += 2
+ off += 2
+ }
+
+ cv = load6432(src, s)
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShortShardDirty(nextHashS)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d != nil {
+ panic("betterFastEncoder: Reset with dict")
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff; i < end; i += 4 {
+ const hashLog = betterShortTableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
+ nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
+ nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
+ nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ e.dictTable[nextHash2] = tableEntry{
+ val: uint32(cv >> 16),
+ offset: i + 2,
+ }
+ e.dictTable[nextHash3] = tableEntry{
+ val: uint32(cv >> 24),
+ offset: i + 3,
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]prevEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ h := hashLen(cv, betterLongTableBits, betterLongLen)
+ e.dictLongTable[h] = prevEntry{
+ offset: e.maxMatchOff,
+ prev: e.dictLongTable[h].offset,
+ }
+
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ off := 8 // First to read
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[off]) << 56)
+ h := hashLen(cv, betterLongTableBits, betterLongLen)
+ e.dictLongTable[h] = prevEntry{
+ offset: i,
+ prev: e.dictLongTable[h].offset,
+ }
+ off++
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ // Reset table to initial state
+ {
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.shortTableShardDirty {
+ if e.shortTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+ const shardCnt = betterShortTableShardCnt
+ const shardSize = betterShortTableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ copy(e.table[:], e.dictTable)
+ for i := range e.shortTableShardDirty {
+ e.shortTableShardDirty[i] = false
+ }
+ } else {
+ for i := range e.shortTableShardDirty {
+ if !e.shortTableShardDirty[i] {
+ continue
+ }
+
+ copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ e.shortTableShardDirty[i] = false
+ }
+ }
+ }
+ {
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.shortTableShardDirty {
+ if e.shortTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+ const shardCnt = betterLongTableShardCnt
+ const shardSize = betterLongTableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ copy(e.longTable[:], e.dictLongTable)
+ for i := range e.longTableShardDirty {
+ e.longTableShardDirty[i] = false
+ }
+ } else {
+ for i := range e.longTableShardDirty {
+ if !e.longTableShardDirty[i] {
+ continue
+ }
+
+ copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
+ e.longTableShardDirty[i] = false
+ }
+ }
+ }
+ e.cur = e.maxMatchOff
+ e.allDirty = false
+}
+
+func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
+ e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
+}
+
+func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
+ e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
new file mode 100644
index 000000000..a154c18f7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -0,0 +1,1123 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "fmt"
+
+const (
+ dFastLongTableBits = 17 // Bits used in the long match table
+ dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
+ dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastLongLen = 8 // Bytes used for table hash
+
+ dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
+ dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
+
+ dFastShortTableBits = tableBits // Bits used in the short match table
+ dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
+ dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ dFastShortLen = 5 // Bytes used for table hash
+
+)
+
+type doubleFastEncoder struct {
+ fastEncoder
+ longTable [dFastLongTableSize]tableEntry
+}
+
+type doubleFastEncoderDict struct {
+ fastEncoderDict
+ longTable [dFastLongTableSize]tableEntry
+ dictLongTable []tableEntry
+ longTableShardDirty [dLongTableShardCnt]bool
+}
+
+// Encode mimmics functionality in zstd_dfast.c
+func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ e.table = [dFastShortTableSize]tableEntry{}
+ e.longTable = [dFastLongTableSize]tableEntry{}
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.longTable[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
+
+ cv = load6432(src, s)
+
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ if e.cur >= e.bufferReset {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ for {
+
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+
+ if len(blk.sequences) > 2 {
+ if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ // Extend the 4-byte match as long as possible.
+ //l := e.matchlen(s+4, t+4, src) + 4
+ l := int32(matchLen(src[s+4:], src[t+4:])) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
+ e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
+ e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
+
+ cv = load6432(src, s)
+
+ if len(blk.sequences) <= 2 {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ //l := 4 + e.matchlen(s+4, o2+4, src)
+ l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.table[nextHashS] = entry
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < e.bufferReset {
+ e.cur += int32(len(src))
+ }
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ // Input margin is the number of bytes we read (8)
+ // and the maximum we will read ahead (2)
+ inputMargin = 8 + 2
+ minNonLiteralBlockSize = 16
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.longTable[:] {
+ e.longTable[i] = tableEntry{}
+ }
+ e.markAllShardsDirty()
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.longTable[:] {
+ v := e.longTable[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.longTable[i].offset = v
+ }
+ e.markAllShardsDirty()
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 1.
+ const stepSize = 1
+
+ const kSearchStrength = 8
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ var t int32
+ // We allow the encoder to optionally turn off repeat offsets across blocks
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+ candidateL := e.longTable[nextHashL]
+ candidateS := e.table[nextHashS]
+
+ const repOff = 1
+ repIndex := s - offset1 + repOff
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = entry
+ e.markShardDirty(nextHashS)
+
+ if canRepeat {
+ if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
+ // Consider history as well.
+ var seq seq
+ lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+
+ seq.matchLen = uint32(lenght - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + repOff
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += lenght + repOff
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, lenght)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ }
+ // Find the offsets of our two matches.
+ coffsetL := s - (candidateL.offset - e.cur)
+ coffsetS := s - (candidateS.offset - e.cur)
+
+ // Check if we have a long match.
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugMatches {
+ println("long match")
+ }
+ break
+ }
+
+ // Check if we have a short match.
+ if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
+ // found a regular match
+ // See if we can find a long match at s+1
+ const checkAt = 1
+ cv := load6432(src, s+checkAt)
+ nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
+ candidateL = e.longTable[nextHashL]
+ coffsetL = s - (candidateL.offset - e.cur) + checkAt
+
+ // We can store it, since we have at least a 4 byte match.
+ e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
+ e.markLongShardDirty(nextHashL)
+ if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
+ // Found a long match, likely at least 8 bytes.
+ // Reference encoder checks all 8 bytes, we only check 4,
+ // but the likelihood of both the first 4 bytes and the hash matching should be enough.
+ t = candidateL.offset - e.cur
+ s += checkAt
+ if debugMatches {
+ println("long match (after short)")
+ }
+ break
+ }
+
+ t = candidateS.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ if debugMatches {
+ println("short match")
+ }
+ break
+ }
+
+ // No match found, move forward in input.
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+
+ // A 4-byte match has been found. Update recent offsets.
+ // We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+
+ // Index match start+1 (long) and start+2 (short)
+ index0 := s - l + 1
+ // Index match end-2 (long) and end-1 (short)
+ index1 := s - 2
+
+ cv0 := load6432(src, index0)
+ cv1 := load6432(src, index1)
+ te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
+ te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
+ longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
+ e.longTable[longHash1] = te0
+ e.longTable[longHash2] = te1
+ e.markLongShardDirty(longHash1)
+ e.markLongShardDirty(longHash2)
+ cv0 >>= 8
+ cv1 >>= 8
+ te0.offset++
+ te1.offset++
+ te0.val = uint32(cv0)
+ te1.val = uint32(cv1)
+ hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
+ hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
+ e.table[hashVal1] = te0
+ e.markShardDirty(hashVal1)
+ e.table[hashVal2] = te1
+ e.markShardDirty(hashVal2)
+
+ cv = load6432(src, s)
+
+ if !canRepeat {
+ continue
+ }
+
+ // Check offset 2
+ for {
+ o2 := s - offset2
+ if load3232(src, o2) != uint32(cv) {
+ // Do regular search
+ break
+ }
+
+ // Store this, since we have it.
+ nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
+
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.longTable[nextHashL] = entry
+ e.markLongShardDirty(nextHashL)
+ e.table[nextHashS] = entry
+ e.markShardDirty(nextHashS)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ // Finished
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+ // If we encoded more than 64K mark all dirty.
+ if len(src) > 64<<10 {
+ e.markAllShardsDirty()
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
+ e.fastEncoder.Reset(d, singleBlock)
+ if d != nil {
+ panic("doubleFastEncoder: Reset with dict not supported")
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
+ allDirty := e.allDirty
+ e.fastEncoderDict.Reset(d, singleBlock)
+ if d == nil {
+ return
+ }
+
+ // Init or copy dict table
+ if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
+ if len(e.dictLongTable) != len(e.longTable) {
+ e.dictLongTable = make([]tableEntry, len(e.longTable))
+ }
+ if len(d.content) >= 8 {
+ cv := load6432(d.content, 0)
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
+ val: uint32(cv),
+ offset: e.maxMatchOff,
+ }
+ end := int32(len(d.content)) - 8 + e.maxMatchOff
+ for i := e.maxMatchOff + 1; i < end; i++ {
+ cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
+ e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ }
+ }
+ e.lastDictID = d.id
+ allDirty = true
+ }
+ // Reset table to initial state
+ e.cur = e.maxMatchOff
+
+ dirtyShardCnt := 0
+ if !allDirty {
+ for i := range e.longTableShardDirty {
+ if e.longTableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+
+ if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
+ for i := range e.longTableShardDirty {
+ e.longTableShardDirty[i] = false
+ }
+ return
+ }
+ for i := range e.longTableShardDirty {
+ if !e.longTableShardDirty[i] {
+ continue
+ }
+
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
+ e.longTableShardDirty[i] = false
+ }
+}
+
+func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
+ e.longTableShardDirty[entryNum/dLongTableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
new file mode 100644
index 000000000..f45a3da7d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -0,0 +1,891 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+)
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
+ tableShardSize = tableSize / tableShardCnt // Size of an individual shard
+ tableFastHashLen = 6
+ tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
+ maxMatchLength = 131074
+)
+
+type tableEntry struct {
+ val uint32
+ offset int32
+}
+
+type fastEncoder struct {
+ fastBase
+ table [tableSize]tableEntry
+}
+
+type fastEncoderDict struct {
+ fastEncoder
+ dictTable []tableEntry
+ tableShardDirty [tableShardCnt]bool
+ allDirty bool
+}
+
+// Encode mimmics functionality in zstd_fast.c
+func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+ if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ // Store this, since we have it.
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// EncodeNoHist will encode a block with no history and no following blocks.
+// Most notable difference is that src will not be copied for history and
+// we do not need to check for max match length.
+func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if debugEncoder {
+ if len(src) > maxCompressedBlockSize {
+ panic("src too big")
+ }
+ }
+
+ // Protect against e.cur wraparound.
+ if e.cur >= e.bufferReset {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = e.maxMatchOff
+ }
+
+ s := int32(0)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+
+ for {
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+
+ if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && t < 0 {
+ panic(fmt.Sprintf("t (%d) < 0 ", t))
+ }
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ // Store this, since we have it.
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+ // We do not store history, so we must offset e.cur to avoid false matches for next user.
+ if e.cur < e.bufferReset {
+ e.cur += int32(len(src))
+ }
+}
+
+// Encode will encode the content, with a dictionary if initialized for it.
+func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
+ const (
+ inputMargin = 8
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+ if e.allDirty || len(src) > 32<<10 {
+ e.fastEncoder.Encode(blk, src)
+ e.allDirty = true
+ return
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
+ if len(e.hist) == 0 {
+ e.table = [tableSize]tableEntry{}
+ e.cur = e.maxMatchOff
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v < minOff {
+ v = 0
+ } else {
+ v = v - e.cur + e.maxMatchOff
+ }
+ e.table[i].offset = v
+ }
+ e.cur = e.maxMatchOff
+ break
+ }
+
+ s := e.addBlock(src)
+ blk.size = len(src)
+ if len(src) < minNonLiteralBlockSize {
+ blk.extraLits = len(src)
+ blk.literals = blk.literals[:len(src)]
+ copy(blk.literals, src)
+ return
+ }
+
+ // Override src
+ src = e.hist
+ sLimit := int32(len(src)) - inputMargin
+ // stepSize is the number of bytes to skip on every main loop iteration.
+ // It should be >= 2.
+ const stepSize = 2
+
+ // TEMPLATE
+ const hashLog = tableBits
+ // seems global, but would be nice to tweak.
+ const kSearchStrength = 7
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := s
+ cv := load6432(src, s)
+
+ // Relative offsets
+ offset1 := int32(blk.recentOffsets[0])
+ offset2 := int32(blk.recentOffsets[1])
+
+ addLiterals := func(s *seq, until int32) {
+ if until == nextEmit {
+ return
+ }
+ blk.literals = append(blk.literals, src[nextEmit:until]...)
+ s.litLen = uint32(until - nextEmit)
+ }
+ if debugEncoder {
+ println("recent offsets:", blk.recentOffsets)
+ }
+
+encodeLoop:
+ for {
+ // t will contain the match offset when we find one.
+ // When existing the search loop, we have already checked 4 bytes.
+ var t int32
+
+ // We will not use repeat offsets across blocks.
+ // By not using them for the first 3 matches
+ canRepeat := len(blk.sequences) > 2
+
+ for {
+ if debugAsserts && canRepeat && offset1 == 0 {
+ panic("offset0 was 0")
+ }
+
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
+ candidate := e.table[nextHash]
+ candidate2 := e.table[nextHash2]
+ repIndex := s - offset1 + 2
+
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShardDirty(nextHash)
+ e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
+ e.markShardDirty(nextHash2)
+
+ if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
+ // Consider history as well.
+ var seq seq
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
+
+ seq.matchLen = uint32(length - zstdMinMatch)
+
+ // We might be able to match backwards.
+ // Extend as long as we can.
+ start := s + 2
+ // We end the search early, so we don't risk 0 literals
+ // and have to do special offset treatment.
+ startLimit := nextEmit + 1
+
+ sMin := s - e.maxMatchOff
+ if sMin < 0 {
+ sMin = 0
+ }
+ for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
+ repIndex--
+ start--
+ seq.matchLen++
+ }
+ addLiterals(&seq, start)
+
+ // rep 0
+ seq.offset = 1
+ if debugSequences {
+ println("repeat sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ s += length + 2
+ nextEmit = s
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, length)
+
+ }
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ continue
+ }
+ coffset0 := s - (candidate.offset - e.cur)
+ coffset1 := s - (candidate2.offset - e.cur) + 1
+ if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
+ // found a regular match
+ t = candidate.offset - e.cur
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ break
+ }
+
+ if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
+ // found a regular match
+ t = candidate2.offset - e.cur
+ s++
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+ if debugAsserts && s-t > e.maxMatchOff {
+ panic("s - t >e.maxMatchOff")
+ }
+ if debugAsserts && t < 0 {
+ panic("t<0")
+ }
+ break
+ }
+ s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+ }
+ // A 4-byte match has been found. We'll later see if more than 4 bytes.
+ offset2 = offset1
+ offset1 = s - t
+
+ if debugAsserts && s <= t {
+ panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
+ }
+
+ if debugAsserts && canRepeat && int(offset1) > len(src) {
+ panic("invalid offset")
+ }
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlen(s+4, t+4, src) + 4
+
+ // Extend backwards
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
+ s--
+ t--
+ l++
+ }
+
+ // Write our sequence.
+ var seq seq
+ seq.litLen = uint32(s - nextEmit)
+ seq.matchLen = uint32(l - zstdMinMatch)
+ if seq.litLen > 0 {
+ blk.literals = append(blk.literals, src[nextEmit:s]...)
+ }
+ // Don't use repeat offsets
+ seq.offset = uint32(s-t) + 3
+ s += l
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+ nextEmit = s
+ if s >= sLimit {
+ break encodeLoop
+ }
+ cv = load6432(src, s)
+
+ // Check offset 2
+ if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
+ // We have at least 4 byte match.
+ // No need to check backwards. We come straight from a match
+ l := 4 + e.matchlen(s+4, o2+4, src)
+
+ // Store this, since we have it.
+ nextHash := hashLen(cv, hashLog, tableFastHashLen)
+ e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
+ e.markShardDirty(nextHash)
+ seq.matchLen = uint32(l) - zstdMinMatch
+ seq.litLen = 0
+ // Since litlen is always 0, this is offset 1.
+ seq.offset = 1
+ s += l
+ nextEmit = s
+ if debugSequences {
+ println("sequence", seq, "next s:", s)
+ }
+ blk.sequences = append(blk.sequences, seq)
+
+ // Swap offset 1 and 2.
+ offset1, offset2 = offset2, offset1
+ if s >= sLimit {
+ break encodeLoop
+ }
+ // Prepare next loop.
+ cv = load6432(src, s)
+ }
+ }
+
+ if int(nextEmit) < len(src) {
+ blk.literals = append(blk.literals, src[nextEmit:]...)
+ blk.extraLits = len(src) - int(nextEmit)
+ }
+ blk.recentOffsets[0] = uint32(offset1)
+ blk.recentOffsets[1] = uint32(offset2)
+ if debugEncoder {
+ println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d != nil {
+ panic("fastEncoder: Reset with dict")
+ }
+}
+
+// ResetDict will reset and set a dictionary if not nil
+func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
+ e.resetBase(d, singleBlock)
+ if d == nil {
+ return
+ }
+
+ // Init or copy dict table
+ if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
+ if len(e.dictTable) != len(e.table) {
+ e.dictTable = make([]tableEntry, len(e.table))
+ }
+ if true {
+ end := e.maxMatchOff + int32(len(d.content)) - 8
+ for i := e.maxMatchOff; i < end; i += 2 {
+ const hashLog = tableBits
+
+ cv := load6432(d.content, i-e.maxMatchOff)
+ nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
+ nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
+ e.dictTable[nextHash] = tableEntry{
+ val: uint32(cv),
+ offset: i,
+ }
+ e.dictTable[nextHash1] = tableEntry{
+ val: uint32(cv >> 8),
+ offset: i + 1,
+ }
+ }
+ }
+ e.lastDictID = d.id
+ e.allDirty = true
+ }
+
+ e.cur = e.maxMatchOff
+ dirtyShardCnt := 0
+ if !e.allDirty {
+ for i := range e.tableShardDirty {
+ if e.tableShardDirty[i] {
+ dirtyShardCnt++
+ }
+ }
+ }
+
+ const shardCnt = tableShardCnt
+ const shardSize = tableShardSize
+ if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
+ for i := range e.tableShardDirty {
+ e.tableShardDirty[i] = false
+ }
+ e.allDirty = false
+ return
+ }
+ for i := range e.tableShardDirty {
+ if !e.tableShardDirty[i] {
+ continue
+ }
+
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
+ e.tableShardDirty[i] = false
+ }
+ e.allDirty = false
+}
+
+func (e *fastEncoderDict) markAllShardsDirty() {
+ e.allDirty = true
+}
+
+func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
+ e.tableShardDirty[entryNum/tableShardSize] = true
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
new file mode 100644
index 000000000..72af7ef0f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -0,0 +1,619 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math"
+ rdebug "runtime/debug"
+ "sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+// Encoder provides encoding to Zstandard.
+// An Encoder can be used for either compressing a stream via the
+// io.WriteCloser interface supported by the Encoder or as multiple independent
+// tasks via the EncodeAll function.
+// Smaller encodes are encouraged to use the EncodeAll function.
+// Use NewWriter to create a new instance.
+type Encoder struct {
+ o encoderOptions
+ encoders chan encoder
+ state encoderState
+ init sync.Once
+}
+
+type encoder interface {
+ Encode(blk *blockEnc, src []byte)
+ EncodeNoHist(blk *blockEnc, src []byte)
+ Block() *blockEnc
+ CRC() *xxhash.Digest
+ AppendCRC([]byte) []byte
+ WindowSize(size int64) int32
+ UseBlock(*blockEnc)
+ Reset(d *dict, singleBlock bool)
+}
+
+type encoderState struct {
+ w io.Writer
+ filling []byte
+ current []byte
+ previous []byte
+ encoder encoder
+ writing *blockEnc
+ err error
+ writeErr error
+ nWritten int64
+ nInput int64
+ frameContentSize int64
+ headerWritten bool
+ eofWritten bool
+ fullFrameWritten bool
+
+ // This waitgroup indicates an encode is running.
+ wg sync.WaitGroup
+ // This waitgroup indicates we have a block encoding/writing.
+ wWg sync.WaitGroup
+}
+
+// NewWriter will create a new Zstandard encoder.
+// If the encoder will be used for encoding blocks a nil writer can be used.
+func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
+ initPredefined()
+ var e Encoder
+ e.o.setDefault()
+ for _, o := range opts {
+ err := o(&e.o)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if w != nil {
+ e.Reset(w)
+ }
+ return &e, nil
+}
+
+func (e *Encoder) initialize() {
+ if e.o.concurrent == 0 {
+ e.o.setDefault()
+ }
+ e.encoders = make(chan encoder, e.o.concurrent)
+ for i := 0; i < e.o.concurrent; i++ {
+ enc := e.o.encoder()
+ e.encoders <- enc
+ }
+}
+
+// Reset will re-initialize the writer and new writes will encode to the supplied writer
+// as a new, independent stream.
+func (e *Encoder) Reset(w io.Writer) {
+ s := &e.state
+ s.wg.Wait()
+ s.wWg.Wait()
+ if cap(s.filling) == 0 {
+ s.filling = make([]byte, 0, e.o.blockSize)
+ }
+ if e.o.concurrent > 1 {
+ if cap(s.current) == 0 {
+ s.current = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.previous) == 0 {
+ s.previous = make([]byte, 0, e.o.blockSize)
+ }
+ s.current = s.current[:0]
+ s.previous = s.previous[:0]
+ if s.writing == nil {
+ s.writing = &blockEnc{lowMem: e.o.lowMem}
+ s.writing.init()
+ }
+ s.writing.initNewEncode()
+ }
+ if s.encoder == nil {
+ s.encoder = e.o.encoder()
+ }
+ s.filling = s.filling[:0]
+ s.encoder.Reset(e.o.dict, false)
+ s.headerWritten = false
+ s.eofWritten = false
+ s.fullFrameWritten = false
+ s.w = w
+ s.err = nil
+ s.nWritten = 0
+ s.nInput = 0
+ s.writeErr = nil
+ s.frameContentSize = 0
+}
+
+// ResetContentSize will reset and set a content size for the next stream.
+// If the bytes written does not match the size given an error will be returned
+// when calling Close().
+// This is removed when Reset is called.
+// Sizes <= 0 results in no content size set.
+func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
+ e.Reset(w)
+ if size >= 0 {
+ e.state.frameContentSize = size
+ }
+}
+
+// Write data to the encoder.
+// Input data will be buffered and as the buffer fills up
+// content will be compressed and written to the output.
+// When done writing, use Close to flush the remaining output
+// and write CRC if requested.
+func (e *Encoder) Write(p []byte) (n int, err error) {
+ s := &e.state
+ for len(p) > 0 {
+ if len(p)+len(s.filling) < e.o.blockSize {
+ if e.o.crc {
+ _, _ = s.encoder.CRC().Write(p)
+ }
+ s.filling = append(s.filling, p...)
+ return n + len(p), nil
+ }
+ add := p
+ if len(p)+len(s.filling) > e.o.blockSize {
+ add = add[:e.o.blockSize-len(s.filling)]
+ }
+ if e.o.crc {
+ _, _ = s.encoder.CRC().Write(add)
+ }
+ s.filling = append(s.filling, add...)
+ p = p[len(add):]
+ n += len(add)
+ if len(s.filling) < e.o.blockSize {
+ return n, nil
+ }
+ err := e.nextBlock(false)
+ if err != nil {
+ return n, err
+ }
+ if debugAsserts && len(s.filling) > 0 {
+ panic(len(s.filling))
+ }
+ }
+ return n, nil
+}
+
+// nextBlock will synchronize and start compressing input in e.state.filling.
+// If an error has occurred during encoding it will be returned.
+func (e *Encoder) nextBlock(final bool) error {
+ s := &e.state
+ // Wait for current block.
+ s.wg.Wait()
+ if s.err != nil {
+ return s.err
+ }
+ if len(s.filling) > e.o.blockSize {
+ return fmt.Errorf("block > maxStoreBlockSize")
+ }
+ if !s.headerWritten {
+ // If we have a single block encode, do a sync compression.
+ if final && len(s.filling) == 0 && !e.o.fullZero {
+ s.headerWritten = true
+ s.fullFrameWritten = true
+ s.eofWritten = true
+ return nil
+ }
+ if final && len(s.filling) > 0 {
+ s.current = e.EncodeAll(s.filling, s.current[:0])
+ var n2 int
+ n2, s.err = s.w.Write(s.current)
+ if s.err != nil {
+ return s.err
+ }
+ s.nWritten += int64(n2)
+ s.nInput += int64(len(s.filling))
+ s.current = s.current[:0]
+ s.filling = s.filling[:0]
+ s.headerWritten = true
+ s.fullFrameWritten = true
+ s.eofWritten = true
+ return nil
+ }
+
+ var tmp [maxHeaderSize]byte
+ fh := frameHeader{
+ ContentSize: uint64(s.frameContentSize),
+ WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
+ SingleSegment: false,
+ Checksum: e.o.crc,
+ DictID: e.o.dict.ID(),
+ }
+
+ dst := fh.appendTo(tmp[:0])
+ s.headerWritten = true
+ s.wWg.Wait()
+ var n2 int
+ n2, s.err = s.w.Write(dst)
+ if s.err != nil {
+ return s.err
+ }
+ s.nWritten += int64(n2)
+ }
+ if s.eofWritten {
+ // Ensure we only write it once.
+ final = false
+ }
+
+ if len(s.filling) == 0 {
+ // Final block, but no data.
+ if final {
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ blk.last = true
+ blk.encodeRaw(nil)
+ s.wWg.Wait()
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ s.eofWritten = true
+ }
+ return s.err
+ }
+
+ // SYNC:
+ if e.o.concurrent == 1 {
+ src := s.filling
+ s.nInput += int64(len(s.filling))
+ if debugEncoder {
+ println("Adding sync block,", len(src), "bytes, final:", final)
+ }
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+
+ s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.err != nil {
+ return s.err
+ }
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ s.filling = s.filling[:0]
+ return s.err
+ }
+
+ // Move blocks forward.
+ s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
+ s.nInput += int64(len(s.current))
+ s.wg.Add(1)
+ go func(src []byte) {
+ if debugEncoder {
+ println("Adding block,", len(src), "bytes, final:", final)
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ s.err = fmt.Errorf("panic while encoding: %v", r)
+ rdebug.PrintStack()
+ }
+ s.wg.Done()
+ }()
+ enc := s.encoder
+ blk := enc.Block()
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+ // Wait for pending writes.
+ s.wWg.Wait()
+ if s.writeErr != nil {
+ s.err = s.writeErr
+ return
+ }
+ // Transfer encoders from previous write block.
+ blk.swapEncoders(s.writing)
+ // Transfer recent offsets to next.
+ enc.UseBlock(s.writing)
+ s.writing = blk
+ s.wWg.Add(1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
+ rdebug.PrintStack()
+ }
+ s.wWg.Done()
+ }()
+ s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.writeErr != nil {
+ return
+ }
+ _, s.writeErr = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ }()
+ }(s.current)
+ return nil
+}
+
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except io.EOF encountered during the read is also returned.
+//
+// The Copy function uses ReaderFrom if available.
+func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
+ if debugEncoder {
+ println("Using ReadFrom")
+ }
+
+ // Flush any current writes.
+ if len(e.state.filling) > 0 {
+ if err := e.nextBlock(false); err != nil {
+ return 0, err
+ }
+ }
+ e.state.filling = e.state.filling[:e.o.blockSize]
+ src := e.state.filling
+ for {
+ n2, err := r.Read(src)
+ if e.o.crc {
+ _, _ = e.state.encoder.CRC().Write(src[:n2])
+ }
+ // src is now the unfilled part...
+ src = src[n2:]
+ n += int64(n2)
+ switch err {
+ case io.EOF:
+ e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
+ if debugEncoder {
+ println("ReadFrom: got EOF final block:", len(e.state.filling))
+ }
+ return n, nil
+ case nil:
+ default:
+ if debugEncoder {
+ println("ReadFrom: got error:", err)
+ }
+ e.state.err = err
+ return n, err
+ }
+ if len(src) > 0 {
+ if debugEncoder {
+ println("ReadFrom: got space left in source:", len(src))
+ }
+ continue
+ }
+ err = e.nextBlock(false)
+ if err != nil {
+ return n, err
+ }
+ e.state.filling = e.state.filling[:e.o.blockSize]
+ src = e.state.filling
+ }
+}
+
+// Flush will send the currently written data to output
+// and block until everything has been written.
+// This should only be used on rare occasions where pushing the currently queued data is critical.
+func (e *Encoder) Flush() error {
+ s := &e.state
+ if len(s.filling) > 0 {
+ err := e.nextBlock(false)
+ if err != nil {
+ return err
+ }
+ }
+ s.wg.Wait()
+ s.wWg.Wait()
+ if s.err != nil {
+ return s.err
+ }
+ return s.writeErr
+}
+
+// Close will flush the final output and close the stream.
+// The function will block until everything has been written.
+// The Encoder can still be re-used after calling this.
+func (e *Encoder) Close() error {
+ s := &e.state
+ if s.encoder == nil {
+ return nil
+ }
+ err := e.nextBlock(true)
+ if err != nil {
+ return err
+ }
+ if s.frameContentSize > 0 {
+ if s.nInput != s.frameContentSize {
+ return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
+ }
+ }
+ if e.state.fullFrameWritten {
+ return s.err
+ }
+ s.wg.Wait()
+ s.wWg.Wait()
+
+ if s.err != nil {
+ return s.err
+ }
+ if s.writeErr != nil {
+ return s.writeErr
+ }
+
+ // Write CRC
+ if e.o.crc && s.err == nil {
+ // heap alloc.
+ var tmp [4]byte
+ _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
+ s.nWritten += 4
+ }
+
+ // Add padding with content from crypto/rand.Reader
+ if s.err == nil && e.o.pad > 0 {
+ add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
+ frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
+ if err != nil {
+ return err
+ }
+ _, s.err = s.w.Write(frame)
+ }
+ return s.err
+}
+
+// EncodeAll will encode all input in src and append it to dst.
+// This function can be called concurrently, but each call will only run on a single goroutine.
+// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
+// Encoded blocks can be concatenated and the result will be the combined input stream.
+// Data compressed with EncodeAll can be decoded with the Decoder,
+// using either a stream or DecodeAll.
+func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ if len(src) == 0 {
+ if e.o.fullZero {
+ // Add frame header.
+ fh := frameHeader{
+ ContentSize: 0,
+ WindowSize: MinWindowSize,
+ SingleSegment: true,
+ // Adding a checksum would be a waste of space.
+ Checksum: false,
+ DictID: 0,
+ }
+ dst = fh.appendTo(dst)
+
+ // Write raw block as last one only.
+ var blk blockHeader
+ blk.setSize(0)
+ blk.setType(blockTypeRaw)
+ blk.setLast(true)
+ dst = blk.appendTo(dst)
+ }
+ return dst
+ }
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ // Release encoder reference to last block.
+ // If a non-single block is needed the encoder will reset again.
+ e.encoders <- enc
+ }()
+ // Use single segments when above minimum window and below window size.
+ single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
+ if e.o.single != nil {
+ single = *e.o.single
+ }
+ fh := frameHeader{
+ ContentSize: uint64(len(src)),
+ WindowSize: uint32(enc.WindowSize(int64(len(src)))),
+ SingleSegment: single,
+ Checksum: e.o.crc,
+ DictID: e.o.dict.ID(),
+ }
+
+ // If less than 1MB, allocate a buffer up front.
+ if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
+ dst = make([]byte, 0, len(src))
+ }
+ dst = fh.appendTo(dst)
+
+ // If we can do everything in one block, prefer that.
+ if len(src) <= e.o.blockSize {
+ enc.Reset(e.o.dict, true)
+ // Slightly faster with no history and everything in one block.
+ if e.o.crc {
+ _, _ = enc.CRC().Write(src)
+ }
+ blk := enc.Block()
+ blk.last = true
+ if e.o.dict == nil {
+ enc.EncodeNoHist(blk, src)
+ } else {
+ enc.Encode(blk, src)
+ }
+
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ oldout := blk.output
+ // Output directly to dst
+ blk.output = dst
+
+ err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
+ panic(err)
+ }
+ dst = blk.output
+ blk.output = oldout
+ } else {
+ enc.Reset(e.o.dict, false)
+ blk := enc.Block()
+ for len(src) > 0 {
+ todo := src
+ if len(todo) > e.o.blockSize {
+ todo = todo[:e.o.blockSize]
+ }
+ src = src[len(todo):]
+ if e.o.crc {
+ _, _ = enc.CRC().Write(todo)
+ }
+ blk.pushOffsets()
+ enc.Encode(blk, todo)
+ if len(src) == 0 {
+ blk.last = true
+ }
+ err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
+ panic(err)
+ }
+ dst = append(dst, blk.output...)
+ blk.reset(nil)
+ }
+ }
+ if e.o.crc {
+ dst = enc.AppendCRC(dst)
+ }
+ // Add padding with content from crypto/rand.Reader
+ if e.o.pad > 0 {
+ add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+ var err error
+ dst, err = skippableFrame(dst, add, rand.Reader)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return dst
+}
+
+// MaxEncodedSize returns the expected maximum
+// size of an encoded block or stream.
+func (e *Encoder) MaxEncodedSize(size int) int {
+ frameHeader := 4 + 2 // magic + frame header & window descriptor
+ if e.o.dict != nil {
+ frameHeader += 4
+ }
+ // Frame content size:
+ if size < 256 {
+ frameHeader++
+ } else if size < 65536+256 {
+ frameHeader += 2
+ } else if size < math.MaxInt32 {
+ frameHeader += 4
+ } else {
+ frameHeader += 8
+ }
+ // Final crc
+ if e.o.crc {
+ frameHeader += 4
+ }
+
+ // Max overhead is 3 bytes/block.
+ // There cannot be 0 blocks.
+ blocks := (size + e.o.blockSize) / e.o.blockSize
+
+ // Combine, add padding.
+ maxSz := frameHeader + 3*blocks + size
+ if e.o.pad > 1 {
+ maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
+ }
+ return maxSz
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
new file mode 100644
index 000000000..20671dcb9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -0,0 +1,339 @@
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "runtime"
+ "strings"
+)
+
+// EOption is an option for creating a encoder.
+type EOption func(*encoderOptions) error
+
+// options retains accumulated state of multiple options.
+type encoderOptions struct {
+ concurrent int
+ level EncoderLevel
+ single *bool
+ pad int
+ blockSize int
+ windowSize int
+ crc bool
+ fullZero bool
+ noEntropy bool
+ allLitEntropy bool
+ customWindow bool
+ customALEntropy bool
+ customBlockSize bool
+ lowMem bool
+ dict *dict
+}
+
+func (o *encoderOptions) setDefault() {
+ *o = encoderOptions{
+ concurrent: runtime.GOMAXPROCS(0),
+ crc: true,
+ single: nil,
+ blockSize: maxCompressedBlockSize,
+ windowSize: 8 << 20,
+ level: SpeedDefault,
+ allLitEntropy: false,
+ lowMem: false,
+ }
+}
+
+// encoder returns an encoder with the selected options.
+func (o encoderOptions) encoder() encoder {
+ switch o.level {
+ case SpeedFastest:
+ if o.dict != nil {
+ return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
+ }
+ return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
+
+ case SpeedDefault:
+ if o.dict != nil {
+ return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
+ }
+ return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
+ case SpeedBetterCompression:
+ if o.dict != nil {
+ return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
+ }
+ return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
+ case SpeedBestCompression:
+ return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
+ }
+ panic("unknown compression level")
+}
+
+// WithEncoderCRC will add CRC value to output.
+// Output will be 4 bytes larger.
+func WithEncoderCRC(b bool) EOption {
+ return func(o *encoderOptions) error { o.crc = b; return nil }
+}
+
+// WithEncoderConcurrency will set the concurrency,
+// meaning the maximum number of encoders to run concurrently.
+// The value supplied must be at least 1.
+// For streams, setting a value of 1 will disable async compression.
+// By default this will be set to GOMAXPROCS.
+func WithEncoderConcurrency(n int) EOption {
+ return func(o *encoderOptions) error {
+ if n <= 0 {
+ return fmt.Errorf("concurrency must be at least 1")
+ }
+ o.concurrent = n
+ return nil
+ }
+}
+
+// WithWindowSize will set the maximum allowed back-reference distance.
+// The value must be a power of two between MinWindowSize and MaxWindowSize.
+// A larger value will enable better compression but allocate more memory and,
+// for above-default values, take considerably longer.
+// The default value is determined by the compression level and max 8MB.
+func WithWindowSize(n int) EOption {
+ return func(o *encoderOptions) error {
+ switch {
+ case n < MinWindowSize:
+ return fmt.Errorf("window size must be at least %d", MinWindowSize)
+ case n > MaxWindowSize:
+ return fmt.Errorf("window size must be at most %d", MaxWindowSize)
+ case (n & (n - 1)) != 0:
+ return errors.New("window size must be a power of 2")
+ }
+
+ o.windowSize = n
+ o.customWindow = true
+ if o.blockSize > o.windowSize {
+ o.blockSize = o.windowSize
+ o.customBlockSize = true
+ }
+ return nil
+ }
+}
+
+// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
+// This can be used to obfuscate the exact output size or make blocks of a certain size.
+// The contents will be a skippable frame, so it will be invisible by the decoder.
+// n must be > 0 and <= 1GB, 1<<30 bytes.
+// The padded area will be filled with data from crypto/rand.Reader.
+// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
+func WithEncoderPadding(n int) EOption {
+ return func(o *encoderOptions) error {
+ if n <= 0 {
+ return fmt.Errorf("padding must be at least 1")
+ }
+ // No need to waste our time.
+ if n == 1 {
+ n = 0
+ }
+ if n > 1<<30 {
+ return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
+ }
+ o.pad = n
+ return nil
+ }
+}
+
+// EncoderLevel predefines encoder compression levels.
+// Only use the constants made available, since the actual mapping
+// of these values are very likely to change and your compression could change
+// unpredictably when upgrading the library.
+type EncoderLevel int
+
+const (
+ speedNotSet EncoderLevel = iota
+
+ // SpeedFastest will choose the fastest reasonable compression.
+ // This is roughly equivalent to the fastest Zstandard mode.
+ SpeedFastest
+
+ // SpeedDefault is the default "pretty fast" compression option.
+ // This is roughly equivalent to the default Zstandard mode (level 3).
+ SpeedDefault
+
+ // SpeedBetterCompression will yield better compression than the default.
+ // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage.
+ // By using this, notice that CPU usage may go up in the future.
+ SpeedBetterCompression
+
+ // SpeedBestCompression will choose the best available compression option.
+ // This will offer the best compression no matter the CPU cost.
+ SpeedBestCompression
+
+ // speedLast should be kept as the last actual compression option.
+ // The is not for external usage, but is used to keep track of the valid options.
+ speedLast
+)
+
+// EncoderLevelFromString will convert a string representation of an encoding level back
+// to a compression level. The compare is not case sensitive.
+// If the string wasn't recognized, (false, SpeedDefault) will be returned.
+func EncoderLevelFromString(s string) (bool, EncoderLevel) {
+ for l := speedNotSet + 1; l < speedLast; l++ {
+ if strings.EqualFold(s, l.String()) {
+ return true, l
+ }
+ }
+ return false, SpeedDefault
+}
+
+// EncoderLevelFromZstd will return an encoder level that closest matches the compression
+// ratio of a specific zstd compression level.
+// Many input values will provide the same compression level.
+func EncoderLevelFromZstd(level int) EncoderLevel {
+ switch {
+ case level < 3:
+ return SpeedFastest
+ case level >= 3 && level < 6:
+ return SpeedDefault
+ case level >= 6 && level < 10:
+ return SpeedBetterCompression
+ default:
+ return SpeedBestCompression
+ }
+}
+
+// String provides a string representation of the compression level.
+func (e EncoderLevel) String() string {
+ switch e {
+ case SpeedFastest:
+ return "fastest"
+ case SpeedDefault:
+ return "default"
+ case SpeedBetterCompression:
+ return "better"
+ case SpeedBestCompression:
+ return "best"
+ default:
+ return "invalid"
+ }
+}
+
+// WithEncoderLevel specifies a predefined compression level.
+func WithEncoderLevel(l EncoderLevel) EOption {
+ return func(o *encoderOptions) error {
+ switch {
+ case l <= speedNotSet || l >= speedLast:
+ return fmt.Errorf("unknown encoder level")
+ }
+ o.level = l
+ if !o.customWindow {
+ switch o.level {
+ case SpeedFastest:
+ o.windowSize = 4 << 20
+ if !o.customBlockSize {
+ o.blockSize = 1 << 16
+ }
+ case SpeedDefault:
+ o.windowSize = 8 << 20
+ case SpeedBetterCompression:
+ o.windowSize = 8 << 20
+ case SpeedBestCompression:
+ o.windowSize = 8 << 20
+ }
+ }
+ if !o.customALEntropy {
+ o.allLitEntropy = l > SpeedDefault
+ }
+
+ return nil
+ }
+}
+
+// WithZeroFrames will encode 0 length input as full frames.
+// This can be needed for compatibility with zstandard usage,
+// but is not needed for this package.
+func WithZeroFrames(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.fullZero = b
+ return nil
+ }
+}
+
+// WithAllLitEntropyCompression will apply entropy compression if no matches are found.
+// Disabling this will skip incompressible data faster, but in cases with no matches but
+// skewed character distribution compression is lost.
+// Default value depends on the compression level selected.
+func WithAllLitEntropyCompression(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.customALEntropy = true
+ o.allLitEntropy = b
+ return nil
+ }
+}
+
+// WithNoEntropyCompression will always skip entropy compression of literals.
+// This can be useful if content has matches, but unlikely to benefit from entropy
+// compression. Usually the slight speed improvement is not worth enabling this.
+func WithNoEntropyCompression(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.noEntropy = b
+ return nil
+ }
+}
+
+// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
+// If this flag is set, data must be regenerated within a single continuous memory segment.
+// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
+// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
+// In order to preserve the decoder from unreasonable memory requirements,
+// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
+// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
+// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
+// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
+// This setting has no effect on streamed encodes.
+func WithSingleSegment(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.single = &b
+ return nil
+ }
+}
+
+// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
+// slower encoding speed.
+// This will not change the window size which is the primary function for reducing
+// memory usage. See WithWindowSize.
+func WithLowerEncoderMem(b bool) EOption {
+ return func(o *encoderOptions) error {
+ o.lowMem = b
+ return nil
+ }
+}
+
+// WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
+func WithEncoderDict(dict []byte) EOption {
+ return func(o *encoderOptions) error {
+ d, err := loadDict(dict)
+ if err != nil {
+ return err
+ }
+ o.dict = d
+ return nil
+ }
+}
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+ return func(o *encoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
new file mode 100644
index 000000000..53e160f7e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -0,0 +1,413 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "io"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
+)
+
+type frameDec struct {
+ o decoderOptions
+ crc *xxhash.Digest
+
+ WindowSize uint64
+
+ // Frame history passed between blocks
+ history history
+
+ rawInput byteBuffer
+
+ // Byte buffer that can be reused for small input blocks.
+ bBuf byteBuf
+
+ FrameContentSize uint64
+
+ DictionaryID uint32
+ HasCheckSum bool
+ SingleSegment bool
+}
+
+const (
+ // MinWindowSize is the minimum Window Size, which is 1 KB.
+ MinWindowSize = 1 << 10
+
+ // MaxWindowSize is the maximum encoder window size
+ // and the default decoder maximum window size.
+ MaxWindowSize = 1 << 29
+)
+
+const (
+ frameMagic = "\x28\xb5\x2f\xfd"
+ skippableFrameMagic = "\x2a\x4d\x18"
+)
+
+func newFrameDec(o decoderOptions) *frameDec {
+ if o.maxWindowSize > o.maxDecodedSize {
+ o.maxWindowSize = o.maxDecodedSize
+ }
+ d := frameDec{
+ o: o,
+ }
+ return &d
+}
+
+// reset will read the frame header and prepare for block decoding.
+// If nothing can be read from the input, io.EOF will be returned.
+// Any other error indicated that the stream contained data, but
+// there was a problem.
+func (d *frameDec) reset(br byteBuffer) error {
+ d.HasCheckSum = false
+ d.WindowSize = 0
+ var signature [4]byte
+ for {
+ var err error
+ // Check if we can read more...
+ b, err := br.readSmall(1)
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return io.EOF
+ case nil:
+ signature[0] = b[0]
+ default:
+ return err
+ }
+ // Read the rest, don't allow io.ErrUnexpectedEOF
+ b, err = br.readSmall(3)
+ switch err {
+ case io.EOF:
+ return io.EOF
+ case nil:
+ copy(signature[1:], b)
+ default:
+ return err
+ }
+
+ if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
+ if debugDecoder {
+ println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
+ }
+ // Break if not skippable frame.
+ break
+ }
+ // Read size to skip
+ b, err = br.readSmall(4)
+ if err != nil {
+ if debugDecoder {
+ println("Reading Frame Size", err)
+ }
+ return err
+ }
+ n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ println("Skipping frame with", n, "bytes.")
+ err = br.skipN(int64(n))
+ if err != nil {
+ if debugDecoder {
+ println("Reading discarded frame", err)
+ }
+ return err
+ }
+ }
+ if string(signature[:]) != frameMagic {
+ if debugDecoder {
+ println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
+ }
+ return ErrMagicMismatch
+ }
+
+ // Read Frame_Header_Descriptor
+ fhd, err := br.readByte()
+ if err != nil {
+ if debugDecoder {
+ println("Reading Frame_Header_Descriptor", err)
+ }
+ return err
+ }
+ d.SingleSegment = fhd&(1<<5) != 0
+
+ if fhd&(1<<3) != 0 {
+ return errors.New("reserved bit set on frame header")
+ }
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
+ d.WindowSize = 0
+ if !d.SingleSegment {
+ wd, err := br.readByte()
+ if err != nil {
+ if debugDecoder {
+ println("Reading Window_Descriptor", err)
+ }
+ return err
+ }
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ windowLog := 10 + (wd >> 3)
+ windowBase := uint64(1) << windowLog
+ windowAdd := (windowBase / 8) * uint64(wd&0x7)
+ d.WindowSize = windowBase + windowAdd
+ }
+
+ // Read Dictionary_ID
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
+ d.DictionaryID = 0
+ if size := fhd & 3; size != 0 {
+ if size == 3 {
+ size = 4
+ }
+
+ b, err := br.readSmall(int(size))
+ if err != nil {
+ println("Reading Dictionary_ID", err)
+ return err
+ }
+ var id uint32
+ switch len(b) {
+ case 1:
+ id = uint32(b[0])
+ case 2:
+ id = uint32(b[0]) | (uint32(b[1]) << 8)
+ case 4:
+ id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ }
+ if debugDecoder {
+ println("Dict size", size, "ID:", id)
+ }
+ d.DictionaryID = id
+ }
+
+ // Read Frame_Content_Size
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
+ var fcsSize int
+ v := fhd >> 6
+ switch v {
+ case 0:
+ if d.SingleSegment {
+ fcsSize = 1
+ }
+ default:
+ fcsSize = 1 << v
+ }
+ d.FrameContentSize = fcsUnknown
+ if fcsSize > 0 {
+ b, err := br.readSmall(fcsSize)
+ if err != nil {
+ println("Reading Frame content", err)
+ return err
+ }
+ switch len(b) {
+ case 1:
+ d.FrameContentSize = uint64(b[0])
+ case 2:
+ // When FCS_Field_Size is 2, the offset of 256 is added.
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
+ case 4:
+ d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
+ case 8:
+ d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+ d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
+ d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
+ }
+ if debugDecoder {
+ println("Read FCS:", d.FrameContentSize)
+ }
+ }
+
+ // Move this to shared.
+ d.HasCheckSum = fhd&(1<<2) != 0
+ if d.HasCheckSum {
+ if d.crc == nil {
+ d.crc = xxhash.New()
+ }
+ d.crc.Reset()
+ }
+
+ if d.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrWindowSizeExceeded
+ }
+
+ if d.WindowSize == 0 && d.SingleSegment {
+ // We may not need window in this case.
+ d.WindowSize = d.FrameContentSize
+ if d.WindowSize < MinWindowSize {
+ d.WindowSize = MinWindowSize
+ }
+ if d.WindowSize > d.o.maxDecodedSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrDecoderSizeExceeded
+ }
+ }
+
+ // The minimum Window_Size is 1 KB.
+ if d.WindowSize < MinWindowSize {
+ if debugDecoder {
+ println("got window size: ", d.WindowSize)
+ }
+ return ErrWindowSizeTooSmall
+ }
+ d.history.windowSize = int(d.WindowSize)
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize {
+ // Alloc 2x window size if not low-mem, or window size below 2MB.
+ d.history.allocFrameBuffer = d.history.windowSize * 2
+ } else {
+ if d.o.lowMem {
+ // Alloc with 1MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+ } else {
+ // Alloc with 2MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+ }
+ }
+
+ if debugDecoder {
+ println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
+ }
+
+ // history contains input - maybe we do something
+ d.rawInput = br
+ return nil
+}
+
+// next will start decoding the next block from stream.
+func (d *frameDec) next(block *blockDec) error {
+ if debugDecoder {
+ println("decoding new block")
+ }
+ err := block.reset(d.rawInput, d.WindowSize)
+ if err != nil {
+ println("block error:", err)
+ // Signal the frame decoder we have a problem.
+ block.sendErr(err)
+ return err
+ }
+ return nil
+}
+
+// checkCRC will check the checksum, assuming the frame has one.
+// Will return ErrCRCMismatch if crc check failed, otherwise nil.
+func (d *frameDec) checkCRC() error {
+ // We can overwrite upper tmp now
+ buf, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ return err
+ }
+
+ want := binary.LittleEndian.Uint32(buf[:4])
+ got := uint32(d.crc.Sum64())
+
+ if got != want {
+ if debugDecoder {
+ printf("CRC check failed: got %08x, want %08x\n", got, want)
+ }
+ return ErrCRCMismatch
+ }
+ if debugDecoder {
+ printf("CRC ok %08x\n", got)
+ }
+ return nil
+}
+
+// consumeCRC skips over the checksum, assuming the frame has one.
+func (d *frameDec) consumeCRC() error {
+ _, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ }
+ return err
+}
+
+// runDecoder will run the decoder for the remainder of the frame.
+func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
+ saved := d.history.b
+
+ // We use the history for output to avoid copying it.
+ d.history.b = dst
+ d.history.ignoreBuffer = len(dst)
+ // Store input length, so we only check new data.
+ crcStart := len(dst)
+ d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
+ if d.FrameContentSize != fcsUnknown {
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
+ if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ // Alloc for output
+ dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
+ var err error
+ for {
+ err = dec.reset(d.rawInput, d.WindowSize)
+ if err != nil {
+ break
+ }
+ if debugDecoder {
+ println("next block:", dec)
+ }
+ err = dec.decodeBuf(&d.history)
+ if err != nil {
+ break
+ }
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
+ err = ErrFrameSizeExceeded
+ break
+ }
+ if dec.Last {
+ break
+ }
+ if debugDecoder {
+ println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
+ }
+ }
+ dst = d.history.b
+ if err == nil {
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ err = ErrFrameSizeMismatch
+ } else if d.HasCheckSum {
+ if d.o.ignoreChecksum {
+ err = d.consumeCRC()
+ } else {
+ d.crc.Write(dst[crcStart:])
+ err = d.checkCRC()
+ }
+ }
+ }
+ d.history.b = saved
+ return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
new file mode 100644
index 000000000..667ca0679
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -0,0 +1,137 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+)
+
+type frameHeader struct {
+ ContentSize uint64
+ WindowSize uint32
+ SingleSegment bool
+ Checksum bool
+ DictID uint32
+}
+
+const maxHeaderSize = 14
+
+func (f frameHeader) appendTo(dst []byte) []byte {
+ dst = append(dst, frameMagic...)
+ var fhd uint8
+ if f.Checksum {
+ fhd |= 1 << 2
+ }
+ if f.SingleSegment {
+ fhd |= 1 << 5
+ }
+
+ var dictIDContent []byte
+ if f.DictID > 0 {
+ var tmp [4]byte
+ if f.DictID < 256 {
+ fhd |= 1
+ tmp[0] = uint8(f.DictID)
+ dictIDContent = tmp[:1]
+ } else if f.DictID < 1<<16 {
+ fhd |= 2
+ binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
+ dictIDContent = tmp[:2]
+ } else {
+ fhd |= 3
+ binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
+ dictIDContent = tmp[:4]
+ }
+ }
+ var fcs uint8
+ if f.ContentSize >= 256 {
+ fcs++
+ }
+ if f.ContentSize >= 65536+256 {
+ fcs++
+ }
+ if f.ContentSize >= 0xffffffff {
+ fcs++
+ }
+
+ fhd |= fcs << 6
+
+ dst = append(dst, fhd)
+ if !f.SingleSegment {
+ const winLogMin = 10
+ windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
+ dst = append(dst, uint8(windowLog))
+ }
+ if f.DictID > 0 {
+ dst = append(dst, dictIDContent...)
+ }
+ switch fcs {
+ case 0:
+ if f.SingleSegment {
+ dst = append(dst, uint8(f.ContentSize))
+ }
+ // Unless SingleSegment is set, framessizes < 256 are not stored.
+ case 1:
+ f.ContentSize -= 256
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
+ case 2:
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
+ case 3:
+ dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
+ uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
+ default:
+ panic("invalid fcs")
+ }
+ return dst
+}
+
+const skippableFrameHeader = 4 + 4
+
+// calcSkippableFrame will return a total size to be added for written
+// to be divisible by multiple.
+// The value will always be > skippableFrameHeader.
+// The function will panic if written < 0 or wantMultiple <= 0.
+func calcSkippableFrame(written, wantMultiple int64) int {
+ if wantMultiple <= 0 {
+ panic("wantMultiple <= 0")
+ }
+ if written < 0 {
+ panic("written < 0")
+ }
+ leftOver := written % wantMultiple
+ if leftOver == 0 {
+ return 0
+ }
+ toAdd := wantMultiple - leftOver
+ for toAdd < skippableFrameHeader {
+ toAdd += wantMultiple
+ }
+ return int(toAdd)
+}
+
+// skippableFrame will add a skippable frame with a total size of bytes.
+// total should be >= skippableFrameHeader and < math.MaxUint32.
+func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
+ if total == 0 {
+ return dst, nil
+ }
+ if total < skippableFrameHeader {
+ return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
+ }
+ if int64(total) > math.MaxUint32 {
+ return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
+ }
+ dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
+ f := uint32(total - skippableFrameHeader)
+ dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
+ start := len(dst)
+ dst = append(dst, make([]byte, f)...)
+ _, err := io.ReadFull(r, dst[start:])
+ return dst, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
new file mode 100644
index 000000000..2f8860a72
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -0,0 +1,307 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+)
+
+const (
+ tablelogAbsoluteMax = 9
+)
+
+const (
+ /*!MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+ maxMemoryUsage = tablelogAbsoluteMax + 2
+
+ maxTableLog = maxMemoryUsage - 2
+ maxTablesize = 1 << maxTableLog
+ maxTableMask = (1 << maxTableLog) - 1
+ minTablelog = 5
+ maxSymbolValue = 255
+)
+
+// fseDecoder provides temporary storage for compression and decompression.
+type fseDecoder struct {
+ dt [maxTablesize]decSymbol // Decompression table.
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ maxBits uint8 // Maximum number of additional bits
+
+ // used for table creation to avoid allocations.
+ stateTable [256]uint16
+ norm [maxSymbolValue + 1]int16
+ preDefined bool
+}
+
+// tableStep returns the next table index.
+func tableStep(tableSize uint32) uint32 {
+ return (tableSize >> 1) + (tableSize >> 3) + 3
+}
+
+// readNCount will read the symbol distribution so decoding tables can be constructed.
+func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
+ var (
+ charnum uint16
+ previous0 bool
+ )
+ if b.remain() < 4 {
+ return errors.New("input too small")
+ }
+ bitStream := b.Uint32NC()
+ nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
+ if nbBits > tablelogAbsoluteMax {
+ println("Invalid tablelog:", nbBits)
+ return errors.New("tableLog too large")
+ }
+ bitStream >>= 4
+ bitCount := uint(4)
+
+ s.actualTableLog = uint8(nbBits)
+ remaining := int32((1 << nbBits) + 1)
+ threshold := int32(1 << nbBits)
+ gotTotal := int32(0)
+ nbBits++
+
+ for remaining > 1 && charnum <= maxSymbol {
+ if previous0 {
+ //println("prev0")
+ n0 := charnum
+ for (bitStream & 0xFFFF) == 0xFFFF {
+ //println("24 x 0")
+ n0 += 24
+ if r := b.remain(); r > 5 {
+ b.advance(2)
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> bitCount
+ } else {
+ // end of bit stream
+ bitStream >>= 16
+ bitCount += 16
+ }
+ }
+ //printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
+ for (bitStream & 3) == 3 {
+ n0 += 3
+ bitStream >>= 2
+ bitCount += 2
+ }
+ n0 += uint16(bitStream & 3)
+ bitCount += 2
+
+ if n0 > maxSymbolValue {
+ return errors.New("maxSymbolValue too small")
+ }
+ //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
+ for charnum < n0 {
+ s.norm[uint8(charnum)] = 0
+ charnum++
+ }
+
+ if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> bitCount
+ } else {
+ bitStream >>= 2
+ }
+ }
+
+ max := (2*threshold - 1) - remaining
+ var count int32
+
+ if int32(bitStream)&(threshold-1) < max {
+ count = int32(bitStream) & (threshold - 1)
+ if debugAsserts && nbBits < 1 {
+ panic("nbBits underflow")
+ }
+ bitCount += nbBits - 1
+ } else {
+ count = int32(bitStream) & (2*threshold - 1)
+ if count >= threshold {
+ count -= max
+ }
+ bitCount += nbBits
+ }
+
+ // extra accuracy
+ count--
+ if count < 0 {
+ // -1 means +1
+ remaining += count
+ gotTotal -= count
+ } else {
+ remaining -= count
+ gotTotal += count
+ }
+ s.norm[charnum&0xff] = int16(count)
+ charnum++
+ previous0 = count == 0
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+
+ if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
+ b.advance(bitCount >> 3)
+ bitCount &= 7
+ // The check above should make sure we can read 32 bits
+ bitStream = b.Uint32NC() >> (bitCount & 31)
+ } else {
+ bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
+ b.off = len(b.b) - 4
+ bitStream = b.Uint32() >> (bitCount & 31)
+ }
+ }
+ s.symbolLen = charnum
+ if s.symbolLen <= 1 {
+ return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
+ }
+ if s.symbolLen > maxSymbolValue+1 {
+ return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
+ }
+ if remaining != 1 {
+ return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
+ }
+ if bitCount > 32 {
+ return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
+ }
+ if gotTotal != 1<> 3)
+ return s.buildDtable()
+}
+
+func (s *fseDecoder) mustReadFrom(r io.Reader) {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ // dt [maxTablesize]decSymbol // Decompression table.
+ // symbolLen uint16 // Length of active part of the symbol table.
+ // actualTableLog uint8 // Selected tablelog.
+ // maxBits uint8 // Maximum number of additional bits
+ // // used for table creation to avoid allocations.
+ // stateTable [256]uint16
+ // norm [maxSymbolValue + 1]int16
+ // preDefined bool
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
+}
+
+// decSymbol contains information about a state entry,
+// Including the state offset base, the output symbol and
+// the number of bits to read for the low part of the destination state.
+// Using a composite uint64 is faster than a struct with separate members.
+type decSymbol uint64
+
+func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
+ return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
+}
+
+func (d decSymbol) nbBits() uint8 {
+ return uint8(d)
+}
+
+func (d decSymbol) addBits() uint8 {
+ return uint8(d >> 8)
+}
+
+func (d decSymbol) newState() uint16 {
+ return uint16(d >> 16)
+}
+
+func (d decSymbol) baselineInt() int {
+ return int(d >> 32)
+}
+
+func (d *decSymbol) setNBits(nBits uint8) {
+ const mask = 0xffffffffffffff00
+ *d = (*d & mask) | decSymbol(nBits)
+}
+
+func (d *decSymbol) setAddBits(addBits uint8) {
+ const mask = 0xffffffffffff00ff
+ *d = (*d & mask) | (decSymbol(addBits) << 8)
+}
+
+func (d *decSymbol) setNewState(state uint16) {
+ const mask = 0xffffffff0000ffff
+ *d = (*d & mask) | decSymbol(state)<<16
+}
+
+func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
+ const mask = 0xffff00ff
+ *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
+}
+
+// decSymbolValue returns the transformed decSymbol for the given symbol.
+func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
+ if int(symb) >= len(t) {
+ return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
+ }
+ lu := t[symb]
+ return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
+}
+
+// setRLE will set the decoder til RLE mode.
+func (s *fseDecoder) setRLE(symbol decSymbol) {
+ s.actualTableLog = 0
+ s.maxBits = symbol.addBits()
+ s.dt[0] = symbol
+}
+
+// transform will transform the decoder table into a table usable for
+// decoding without having to apply the transformation while decoding.
+// The state will contain the base value and the number of bits to read.
+func (s *fseDecoder) transform(t []baseOffset) error {
+ tableSize := uint16(1 << s.actualTableLog)
+ s.maxBits = 0
+ for i, v := range s.dt[:tableSize] {
+ add := v.addBits()
+ if int(add) >= len(t) {
+ return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
+ }
+ lu := t[add]
+ if lu.addBits > s.maxBits {
+ s.maxBits = lu.addBits
+ }
+ v.setExt(lu.addBits, lu.baseLine)
+ s.dt[i] = v
+ }
+ return nil
+}
+
+type fseState struct {
+ dt []decSymbol
+ state decSymbol
+}
+
+// Initialize and decodeAsync first state and symbol.
+func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
+ s.dt = dt
+ br.fill()
+ s.state = dt[br.getBits(tableLog)]
+}
+
+// final returns the current state symbol without decoding the next.
+func (s decSymbol) final() (int, uint8) {
+ return s.baselineInt(), s.addBits()
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
new file mode 100644
index 000000000..d04a829b0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -0,0 +1,65 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+)
+
+type buildDtableAsmContext struct {
+ // inputs
+ stateTable *uint16
+ norm *int16
+ dt *uint64
+
+ // outputs --- set by the procedure in the case of error;
+ // for interpretation please see the error handling part below
+ errParam1 uint64
+ errParam2 uint64
+}
+
+// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
+// Function returns non-zero exit code on error.
+//
+//go:noescape
+func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+
+// please keep in sync with _generate/gen_fse.go
+const (
+ errorCorruptedNormalizedCounter = 1
+ errorNewStateTooBig = 2
+ errorNewStateNoBits = 3
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ ctx := buildDtableAsmContext{
+ stateTable: &s.stateTable[0],
+ norm: &s.norm[0],
+ dt: (*uint64)(&s.dt[0]),
+ }
+ code := buildDtable_asm(s, &ctx)
+
+ if code != 0 {
+ switch code {
+ case errorCorruptedNormalizedCounter:
+ position := ctx.errParam1
+ return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
+
+ case errorNewStateTooBig:
+ newState := decSymbol(ctx.errParam1)
+ size := ctx.errParam2
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
+
+ case errorNewStateNoBits:
+ newState := decSymbol(ctx.errParam1)
+ oldState := decSymbol(ctx.errParam2)
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
+
+ default:
+ return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
new file mode 100644
index 000000000..bcde39869
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -0,0 +1,126 @@
+// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+TEXT ·buildDtable_asm(SB), $0-24
+ MOVQ ctx+8(FP), CX
+ MOVQ s+0(FP), DI
+
+ // Load values
+ MOVBQZX 4098(DI), DX
+ XORQ AX, AX
+ BTSQ DX, AX
+ MOVQ (CX), BX
+ MOVQ 16(CX), SI
+ LEAQ -1(AX), R8
+ MOVQ 8(CX), CX
+ MOVWQZX 4096(DI), DI
+
+ // End load values
+ // Init, lay down lowprob symbols
+ XORQ R9, R9
+ JMP init_main_loop_condition
+
+init_main_loop:
+ MOVWQSX (CX)(R9*2), R10
+ CMPW R10, $-1
+ JNE do_not_update_high_threshold
+ MOVB R9, 1(SI)(R8*8)
+ DECQ R8
+ MOVQ $0x0000000000000001, R10
+
+do_not_update_high_threshold:
+ MOVW R10, (BX)(R9*2)
+ INCQ R9
+
+init_main_loop_condition:
+ CMPQ R9, DI
+ JL init_main_loop
+
+ // Spread symbols
+ // Calculate table step
+ MOVQ AX, R9
+ SHRQ $0x01, R9
+ MOVQ AX, R10
+ SHRQ $0x03, R10
+ LEAQ 3(R9)(R10*1), R9
+
+ // Fill add bits values
+ LEAQ -1(AX), R10
+ XORQ R11, R11
+ XORQ R12, R12
+ JMP spread_main_loop_condition
+
+spread_main_loop:
+ XORQ R13, R13
+ MOVWQSX (CX)(R12*2), R14
+ JMP spread_inner_loop_condition
+
+spread_inner_loop:
+ MOVB R12, 1(SI)(R11*8)
+
+adjust_position:
+ ADDQ R9, R11
+ ANDQ R10, R11
+ CMPQ R11, R8
+ JG adjust_position
+ INCQ R13
+
+spread_inner_loop_condition:
+ CMPQ R13, R14
+ JL spread_inner_loop
+ INCQ R12
+
+spread_main_loop_condition:
+ CMPQ R12, DI
+ JL spread_main_loop
+ TESTQ R11, R11
+ JZ spread_check_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R11, 24(AX)
+ MOVQ $+1, ret+16(FP)
+ RET
+
+spread_check_ok:
+ // Build Decoding table
+ XORQ DI, DI
+
+build_table_main_table:
+ MOVBQZX 1(SI)(DI*8), CX
+ MOVWQZX (BX)(CX*2), R8
+ LEAQ 1(R8), R9
+ MOVW R9, (BX)(CX*2)
+ MOVQ R8, R9
+ BSRQ R9, R9
+ MOVQ DX, CX
+ SUBQ R9, CX
+ SHLQ CL, R8
+ SUBQ AX, R8
+ MOVB CL, (SI)(DI*8)
+ MOVW R8, 2(SI)(DI*8)
+ CMPQ R8, AX
+ JLE build_table_check1_ok
+ MOVQ ctx+8(FP), CX
+ MOVQ R8, 24(CX)
+ MOVQ AX, 32(CX)
+ MOVQ $+2, ret+16(FP)
+ RET
+
+build_table_check1_ok:
+ TESTB CL, CL
+ JNZ build_table_check2_ok
+ CMPW R8, DI
+ JNE build_table_check2_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R8, 24(AX)
+ MOVQ DI, 32(AX)
+ MOVQ $+3, ret+16(FP)
+ RET
+
+build_table_check2_ok:
+ INCQ DI
+ CMPQ DI, AX
+ JL build_table_main_table
+ MOVQ $+0, ret+16(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
new file mode 100644
index 000000000..8adfebb02
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -0,0 +1,73 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].setAddBits(uint8(i))
+ highThreshold--
+ v = 1
+ }
+ symbolNext[i] = uint16(v)
+ }
+ }
+
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].setAddBits(uint8(ss))
+ for {
+ // lowprob area
+ position = (position + step) & tableMask
+ if position <= highThreshold {
+ break
+ }
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits()
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].setNBits(nBits)
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].setNewState(newState)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
new file mode 100644
index 000000000..ab26326a8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -0,0 +1,701 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "math"
+)
+
+const (
+ // For encoding we only support up to
+ maxEncTableLog = 8
+ maxEncTablesize = 1 << maxTableLog
+ maxEncTableMask = (1 << maxTableLog) - 1
+ minEncTablelog = 5
+ maxEncSymbolValue = maxMatchLengthSymbol
+)
+
+// Scratch provides temporary storage for compression and decompression.
+type fseEncoder struct {
+ symbolLen uint16 // Length of active part of the symbol table.
+ actualTableLog uint8 // Selected tablelog.
+ ct cTable // Compression tables.
+ maxCount int // count of the most probable symbol
+ zeroBits bool // no bits has prob > 50%.
+ clearCount bool // clear count
+ useRLE bool // This encoder is for RLE
+ preDefined bool // This encoder is predefined.
+ reUsed bool // Set to know when the encoder has been reused.
+ rleVal uint8 // RLE Symbol
+ maxBits uint8 // Maximum output bits after transform.
+
+ // TODO: Technically zstd should be fine with 64 bytes.
+ count [256]uint32
+ norm [256]int16
+}
+
+// cTable contains tables used for compression.
+type cTable struct {
+ tableSymbol []byte
+ stateTable []uint16
+ symbolTT []symbolTransform
+}
+
+// symbolTransform contains the state transform for a symbol.
+type symbolTransform struct {
+ deltaNbBits uint32
+ deltaFindState int16
+ outBits uint8
+}
+
+// String prints values as a human readable string.
+func (s symbolTransform) String() string {
+ return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
+}
+
+// Histogram allows to populate the histogram and skip that step in the compression,
+// It otherwise allows to inspect the histogram when compression is done.
+// To indicate that you have populated the histogram call HistogramFinished
+// with the value of the highest populated symbol, as well as the number of entries
+// in the most populated entry. These are accepted at face value.
+func (s *fseEncoder) Histogram() *[256]uint32 {
+ return &s.count
+}
+
+// HistogramFinished can be called to indicate that the histogram has been populated.
+// maxSymbol is the index of the highest set symbol of the next data segment.
+// maxCount is the number of entries in the most populated entry.
+// These are accepted at face value.
+func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
+ s.maxCount = maxCount
+ s.symbolLen = uint16(maxSymbol) + 1
+ s.clearCount = maxCount != 0
+}
+
+// allocCtable will allocate tables needed for compression.
+// If existing tables a re big enough, they are simply re-used.
+func (s *fseEncoder) allocCtable() {
+ tableSize := 1 << s.actualTableLog
+ // get tableSymbol that is big enough.
+ if cap(s.ct.tableSymbol) < tableSize {
+ s.ct.tableSymbol = make([]byte, tableSize)
+ }
+ s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
+
+ ctSize := tableSize
+ if cap(s.ct.stateTable) < ctSize {
+ s.ct.stateTable = make([]uint16, ctSize)
+ }
+ s.ct.stateTable = s.ct.stateTable[:ctSize]
+
+ if cap(s.ct.symbolTT) < 256 {
+ s.ct.symbolTT = make([]symbolTransform, 256)
+ }
+ s.ct.symbolTT = s.ct.symbolTT[:256]
+}
+
+// buildCTable will populate the compression table so it is ready to be used.
+func (s *fseEncoder) buildCTable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ var cumul [256]int16
+
+ s.allocCtable()
+ tableSymbol := s.ct.tableSymbol[:tableSize]
+ // symbol start positions
+ {
+ cumul[0] = 0
+ for ui, v := range s.norm[:s.symbolLen-1] {
+ u := byte(ui) // one less than reference
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = u
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ }
+ // Encode last symbol separately to avoid overflowing u
+ u := int(s.symbolLen - 1)
+ v := s.norm[s.symbolLen-1]
+ if v == -1 {
+ // Low proba symbol
+ cumul[u+1] = cumul[u] + 1
+ tableSymbol[highThreshold] = byte(u)
+ highThreshold--
+ } else {
+ cumul[u+1] = cumul[u] + v
+ }
+ if uint32(cumul[s.symbolLen]) != tableSize {
+ return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
+ }
+ cumul[s.symbolLen] = int16(tableSize) + 1
+ }
+ // Spread symbols
+ s.zeroBits = false
+ {
+ step := tableStep(tableSize)
+ tableMask := tableSize - 1
+ var position uint32
+ // if any symbol > largeLimit, we may have 0 bits output.
+ largeLimit := int16(1 << (s.actualTableLog - 1))
+ for ui, v := range s.norm[:s.symbolLen] {
+ symbol := byte(ui)
+ if v > largeLimit {
+ s.zeroBits = true
+ }
+ for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
+ tableSymbol[position] = symbol
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ position = (position + step) & tableMask
+ } /* Low proba area */
+ }
+ }
+
+ // Check if we have gone through all positions
+ if position != 0 {
+ return errors.New("position!=0")
+ }
+ }
+
+ // Build table
+ table := s.ct.stateTable
+ {
+ tsi := int(tableSize)
+ for u, v := range tableSymbol {
+ // TableU16 : sorted by symbol order; gives next state value
+ table[cumul[v]] = uint16(tsi + u)
+ cumul[v]++
+ }
+ }
+
+ // Build Symbol Transformation Table
+ {
+ total := int16(0)
+ symbolTT := s.ct.symbolTT[:s.symbolLen]
+ tableLog := s.actualTableLog
+ tl := (uint32(tableLog) << 16) - (1 << tableLog)
+ for i, v := range s.norm[:s.symbolLen] {
+ switch v {
+ case 0:
+ case -1, 1:
+ symbolTT[i].deltaNbBits = tl
+ symbolTT[i].deltaFindState = total - 1
+ total++
+ default:
+ maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
+ minStatePlus := uint32(v) << maxBitsOut
+ symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
+ symbolTT[i].deltaFindState = total - v
+ total += v
+ }
+ }
+ if total != int16(tableSize) {
+ return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
+ }
+ }
+ return nil
+}
+
+var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
+
+func (s *fseEncoder) setRLE(val byte) {
+ s.allocCtable()
+ s.actualTableLog = 0
+ s.ct.stateTable = s.ct.stateTable[:1]
+ s.ct.symbolTT[val] = symbolTransform{
+ deltaFindState: 0,
+ deltaNbBits: 0,
+ }
+ if debugEncoder {
+ println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
+ }
+ s.rleVal = val
+ s.useRLE = true
+}
+
+// setBits will set output bits for the transform.
+// if nil is provided, the number of bits is equal to the index.
+func (s *fseEncoder) setBits(transform []byte) {
+ if s.reUsed || s.preDefined {
+ return
+ }
+ if s.useRLE {
+ if transform == nil {
+ s.ct.symbolTT[s.rleVal].outBits = s.rleVal
+ s.maxBits = s.rleVal
+ return
+ }
+ s.maxBits = transform[s.rleVal]
+ s.ct.symbolTT[s.rleVal].outBits = s.maxBits
+ return
+ }
+ if transform == nil {
+ for i := range s.ct.symbolTT[:s.symbolLen] {
+ s.ct.symbolTT[i].outBits = uint8(i)
+ }
+ s.maxBits = uint8(s.symbolLen - 1)
+ return
+ }
+ s.maxBits = 0
+ for i, v := range transform[:s.symbolLen] {
+ s.ct.symbolTT[i].outBits = v
+ if v > s.maxBits {
+ // We could assume bits always going up, but we play safe.
+ s.maxBits = v
+ }
+ }
+}
+
+// normalizeCount will normalize the count of the symbols so
+// the total is equal to the table size.
+// If successful, compression tables will also be made ready.
+func (s *fseEncoder) normalizeCount(length int) error {
+ if s.reUsed {
+ return nil
+ }
+ s.optimalTableLog(length)
+ var (
+ tableLog = s.actualTableLog
+ scale = 62 - uint64(tableLog)
+ step = (1 << 62) / uint64(length)
+ vStep = uint64(1) << (scale - 20)
+ stillToDistribute = int16(1 << tableLog)
+ largest int
+ largestP int16
+ lowThreshold = (uint32)(length >> tableLog)
+ )
+ if s.maxCount == length {
+ s.useRLE = true
+ return nil
+ }
+ s.useRLE = false
+ for i, cnt := range s.count[:s.symbolLen] {
+ // already handled
+ // if (count[s] == s.length) return 0; /* rle special case */
+
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ stillToDistribute--
+ } else {
+ proba := (int16)((uint64(cnt) * step) >> scale)
+ if proba < 8 {
+ restToBeat := vStep * uint64(rtbTable[proba])
+ v := uint64(cnt)*step - (uint64(proba) << scale)
+ if v > restToBeat {
+ proba++
+ }
+ }
+ if proba > largestP {
+ largestP = proba
+ largest = i
+ }
+ s.norm[i] = proba
+ stillToDistribute -= proba
+ }
+ }
+
+ if -stillToDistribute >= (s.norm[largest] >> 1) {
+ // corner case, need another normalization method
+ err := s.normalizeCount2(length)
+ if err != nil {
+ return err
+ }
+ if debugAsserts {
+ err = s.validateNorm()
+ if err != nil {
+ return err
+ }
+ }
+ return s.buildCTable()
+ }
+ s.norm[largest] += stillToDistribute
+ if debugAsserts {
+ err := s.validateNorm()
+ if err != nil {
+ return err
+ }
+ }
+ return s.buildCTable()
+}
+
+// Secondary normalization method.
+// To be used when primary method fails.
+func (s *fseEncoder) normalizeCount2(length int) error {
+ const notYetAssigned = -2
+ var (
+ distributed uint32
+ total = uint32(length)
+ tableLog = s.actualTableLog
+ lowThreshold = total >> tableLog
+ lowOne = (total * 3) >> (tableLog + 1)
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt == 0 {
+ s.norm[i] = 0
+ continue
+ }
+ if cnt <= lowThreshold {
+ s.norm[i] = -1
+ distributed++
+ total -= cnt
+ continue
+ }
+ if cnt <= lowOne {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ s.norm[i] = notYetAssigned
+ }
+ toDistribute := (1 << tableLog) - distributed
+
+ if (total / toDistribute) > lowOne {
+ // risk of rounding to zero
+ lowOne = (total * 3) / (toDistribute * 2)
+ for i, cnt := range s.count[:s.symbolLen] {
+ if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
+ s.norm[i] = 1
+ distributed++
+ total -= cnt
+ continue
+ }
+ }
+ toDistribute = (1 << tableLog) - distributed
+ }
+ if distributed == uint32(s.symbolLen)+1 {
+ // all values are pretty poor;
+ // probably incompressible data (should have already been detected);
+ // find max, then give all remaining points to max
+ var maxV int
+ var maxC uint32
+ for i, cnt := range s.count[:s.symbolLen] {
+ if cnt > maxC {
+ maxV = i
+ maxC = cnt
+ }
+ }
+ s.norm[maxV] += int16(toDistribute)
+ return nil
+ }
+
+ if total == 0 {
+ // all of the symbols were low enough for the lowOne or lowThreshold
+ for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
+ if s.norm[i] > 0 {
+ toDistribute--
+ s.norm[i]++
+ }
+ }
+ return nil
+ }
+
+ var (
+ vStepLog = 62 - uint64(tableLog)
+ mid = uint64((1 << (vStepLog - 1)) - 1)
+ rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
+ tmpTotal = mid
+ )
+ for i, cnt := range s.count[:s.symbolLen] {
+ if s.norm[i] == notYetAssigned {
+ var (
+ end = tmpTotal + uint64(cnt)*rStep
+ sStart = uint32(tmpTotal >> vStepLog)
+ sEnd = uint32(end >> vStepLog)
+ weight = sEnd - sStart
+ )
+ if weight < 1 {
+ return errors.New("weight < 1")
+ }
+ s.norm[i] = int16(weight)
+ tmpTotal = end
+ }
+ }
+ return nil
+}
+
+// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
+func (s *fseEncoder) optimalTableLog(length int) {
+ tableLog := uint8(maxEncTableLog)
+ minBitsSrc := highBit(uint32(length)) + 1
+ minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
+ minBits := uint8(minBitsSymbols)
+ if minBitsSrc < minBitsSymbols {
+ minBits = uint8(minBitsSrc)
+ }
+
+ maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
+ if maxBitsSrc < tableLog {
+ // Accuracy can be reduced
+ tableLog = maxBitsSrc
+ }
+ if minBits > tableLog {
+ tableLog = minBits
+ }
+ // Need a minimum to safely represent all symbol values
+ if tableLog < minEncTablelog {
+ tableLog = minEncTablelog
+ }
+ if tableLog > maxEncTableLog {
+ tableLog = maxEncTableLog
+ }
+ s.actualTableLog = tableLog
+}
+
+// validateNorm validates the normalized histogram table.
+func (s *fseEncoder) validateNorm() (err error) {
+ var total int
+ for _, v := range s.norm[:s.symbolLen] {
+ if v >= 0 {
+ total += int(v)
+ } else {
+ total -= int(v)
+ }
+ }
+ defer func() {
+ if err == nil {
+ return
+ }
+ fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
+ for i, v := range s.norm[:s.symbolLen] {
+ fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
+ }
+ }()
+ if total != (1 << s.actualTableLog) {
+ return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2
+
+ // Write Table Size
+ bitStream = uint32(tableLog - minEncTablelog)
+ bitCount = uint(4)
+ remaining = int16(tableSize + 1) /* +1 for extra accuracy */
+ threshold = int16(tableSize)
+ nbBits = uint(tableLog + 1)
+ outP = len(out)
+ )
+ if cap(out) < outP+maxHeaderSize {
+ out = append(out, make([]byte, maxHeaderSize*3)...)
+ out = out[:len(out)-maxHeaderSize*3]
+ }
+ out = out[:outP+maxHeaderSize]
+
+ // stops at 1
+ for remaining > 1 {
+ if previous0 {
+ start := charnum
+ for s.norm[charnum] == 0 {
+ charnum++
+ }
+ for charnum >= start+24 {
+ start += 24
+ bitStream += uint32(0xFFFF) << bitCount
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ }
+ for charnum >= start+3 {
+ start += 3
+ bitStream += 3 << bitCount
+ bitCount += 2
+ }
+ bitStream += uint32(charnum-start) << bitCount
+ bitCount += 2
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ count := s.norm[charnum]
+ charnum++
+ max := (2*threshold - 1) - remaining
+ if count < 0 {
+ remaining += count
+ } else {
+ remaining -= count
+ }
+ count++ // +1 for extra accuracy
+ if count >= threshold {
+ count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
+ }
+ bitStream += uint32(count) << bitCount
+ bitCount += nbBits
+ if count < max {
+ bitCount--
+ }
+
+ previous0 = count == 1
+ if remaining < 1 {
+ return nil, errors.New("internal error: remaining < 1")
+ }
+ for remaining < threshold {
+ nbBits--
+ threshold >>= 1
+ }
+
+ if bitCount > 16 {
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += 2
+ bitStream >>= 16
+ bitCount -= 16
+ }
+ }
+
+ if outP+2 > len(out) {
+ return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
+ }
+ out[outP] = byte(bitStream)
+ out[outP+1] = byte(bitStream >> 8)
+ outP += int((bitCount + 7) / 8)
+
+ if charnum > s.symbolLen {
+ return nil, errors.New("internal error: charnum > s.symbolLen")
+ }
+ return out[:outP], nil
+}
+
+// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+// note 1 : assume symbolValue is valid (<= maxSymbolValue)
+// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
+func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
+ minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
+ threshold := (minNbBits + 1) << 16
+ if debugAsserts {
+ if !(s.actualTableLog < 16) {
+ panic("!s.actualTableLog < 16")
+ }
+ // ensure enough room for renormalization double shift
+ if !(uint8(accuracyLog) < 31-s.actualTableLog) {
+ panic("!uint8(accuracyLog) < 31-s.actualTableLog")
+ }
+ }
+ tableSize := uint32(1) << s.actualTableLog
+ deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
+ // linear interpolation (very approximate)
+ normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
+ bitMultiplier := uint32(1) << accuracyLog
+ if debugAsserts {
+ if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
+ panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
+ }
+ if normalizedDeltaFromThreshold > bitMultiplier {
+ panic("normalizedDeltaFromThreshold > bitMultiplier")
+ }
+ }
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
+}
+
+// Returns the cost in bits of encoding the distribution in count using ctable.
+// Histogram should only be up to the last non-zero symbol.
+// Returns an -1 if ctable cannot represent all the symbols in count.
+func (s *fseEncoder) approxSize(hist []uint32) uint32 {
+ if int(s.symbolLen) < len(hist) {
+ // More symbols than we have.
+ return math.MaxUint32
+ }
+ if s.useRLE {
+ // We will never reuse RLE encoders.
+ return math.MaxUint32
+ }
+ const kAccuracyLog = 8
+ badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
+ var cost uint32
+ for i, v := range hist {
+ if v == 0 {
+ continue
+ }
+ if s.norm[i] == 0 {
+ return math.MaxUint32
+ }
+ bitCost := s.bitCost(uint8(i), kAccuracyLog)
+ if bitCost > badCost {
+ return math.MaxUint32
+ }
+ cost += v * bitCost
+ }
+ return cost >> kAccuracyLog
+}
+
+// maxHeaderSize returns the maximum header size in bits.
+// This is not exact size, but we want a penalty for new tables anyway.
+func (s *fseEncoder) maxHeaderSize() uint32 {
+ if s.preDefined {
+ return 0
+ }
+ if s.useRLE {
+ return 8
+ }
+ return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
+}
+
+// cState contains the compression state of a stream.
+type cState struct {
+ bw *bitWriter
+ stateTable []uint16
+ state uint16
+}
+
+// init will initialize the compression state to the first symbol of the stream.
+func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
+ c.bw = bw
+ c.stateTable = ct.stateTable
+ if len(c.stateTable) == 1 {
+ // RLE
+ c.stateTable[0] = uint16(0)
+ c.state = 0
+ return
+ }
+ nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
+ im := int32((nbBitsOut << 16) - first.deltaNbBits)
+ lu := (im >> nbBitsOut) + int32(first.deltaFindState)
+ c.state = c.stateTable[lu]
+}
+
+// flush will write the tablelog to the output and flush the remaining full bytes.
+func (c *cState) flush(tableLog uint8) {
+ c.bw.flush32()
+ c.bw.addBits16NC(c.state, tableLog)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
new file mode 100644
index 000000000..474cb77d2
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go
@@ -0,0 +1,158 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "fmt"
+ "math"
+ "sync"
+)
+
+var (
+ // fsePredef are the predefined fse tables as defined here:
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ // These values are already transformed.
+ fsePredef [3]fseDecoder
+
+ // fsePredefEnc are the predefined encoder based on fse tables as defined here:
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ // These values are already transformed.
+ fsePredefEnc [3]fseEncoder
+
+ // symbolTableX contain the transformations needed for each type as defined in
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+ symbolTableX [3][]baseOffset
+
+ // maxTableSymbol is the biggest supported symbol for each table type
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
+ maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
+
+ // bitTables is the bits table for each table.
+ bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
+)
+
+type tableIndex uint8
+
+const (
+ // indexes for fsePredef and symbolTableX
+ tableLiteralLengths tableIndex = 0
+ tableOffsets tableIndex = 1
+ tableMatchLengths tableIndex = 2
+
+ maxLiteralLengthSymbol = 35
+ maxOffsetLengthSymbol = 30
+ maxMatchLengthSymbol = 52
+)
+
+// baseOffset is used for calculating transformations.
+type baseOffset struct {
+ baseLine uint32
+ addBits uint8
+}
+
+// fillBase will precalculate base offsets with the given bit distributions.
+func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
+ if len(bits) != len(dst) {
+ panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
+ }
+ for i, bit := range bits {
+ if base > math.MaxInt32 {
+ panic("invalid decoding table, base overflows int32")
+ }
+
+ dst[i] = baseOffset{
+ baseLine: base,
+ addBits: bit,
+ }
+ base += 1 << bit
+ }
+}
+
+var predef sync.Once
+
+func initPredefined() {
+ predef.Do(func() {
+ // Literals length codes
+ tmp := make([]baseOffset, 36)
+ for i := range tmp[:16] {
+ tmp[i] = baseOffset{
+ baseLine: uint32(i),
+ addBits: 0,
+ }
+ }
+ fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableLiteralLengths] = tmp
+
+ // Match length codes
+ tmp = make([]baseOffset, 53)
+ for i := range tmp[:32] {
+ tmp[i] = baseOffset{
+ // The transformation adds the 3 length.
+ baseLine: uint32(i) + 3,
+ addBits: 0,
+ }
+ }
+ fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
+ symbolTableX[tableMatchLengths] = tmp
+
+ // Offset codes
+ tmp = make([]baseOffset, maxOffsetBits+1)
+ tmp[1] = baseOffset{
+ baseLine: 1,
+ addBits: 1,
+ }
+ fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
+ symbolTableX[tableOffsets] = tmp
+
+ // Fill predefined tables and transform them.
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
+ for i := range fsePredef[:] {
+ f := &fsePredef[i]
+ switch tableIndex(i) {
+ case tableLiteralLengths:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1})
+ f.symbolLen = 36
+ case tableOffsets:
+ // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
+ f.actualTableLog = 5
+ copy(f.norm[:], []int16{
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
+ f.symbolLen = 29
+ case tableMatchLengths:
+ //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
+ f.actualTableLog = 6
+ copy(f.norm[:], []int16{
+ 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1})
+ f.symbolLen = 53
+ }
+ if err := f.buildDtable(); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ if err := f.transform(symbolTableX[i]); err != nil {
+ panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
+ }
+ f.preDefined = true
+
+ // Create encoder as well
+ enc := &fsePredefEnc[i]
+ copy(enc.norm[:], f.norm[:])
+ enc.symbolLen = f.symbolLen
+ enc.actualTableLog = f.actualTableLog
+ if err := enc.buildCTable(); err != nil {
+ panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
+ }
+ enc.setBits(bitTables[i])
+ enc.preDefined = true
+ }
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
new file mode 100644
index 000000000..5d73c21eb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -0,0 +1,35 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
new file mode 100644
index 000000000..09164856d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -0,0 +1,116 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "github.com/klauspost/compress/huff0"
+)
+
+// history contains the information transferred between blocks.
+type history struct {
+ // Literal decompression
+ huffTree *huff0.Scratch
+
+ // Sequence decompression
+ decoders sequenceDecs
+ recentOffsets [3]int
+
+ // History buffer...
+ b []byte
+
+ // ignoreBuffer is meant to ignore a number of bytes
+ // when checking for matches in history
+ ignoreBuffer int
+
+ windowSize int
+ allocFrameBuffer int // needed?
+ error bool
+ dict *dict
+}
+
+// reset will reset the history to initial state of a frame.
+// The history must already have been initialized to the desired size.
+func (h *history) reset() {
+ h.b = h.b[:0]
+ h.ignoreBuffer = 0
+ h.error = false
+ h.recentOffsets = [3]int{1, 4, 8}
+ h.decoders.freeDecoders()
+ h.decoders = sequenceDecs{br: h.decoders.br}
+ h.freeHuffDecoder()
+ h.huffTree = nil
+ h.dict = nil
+ //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
+ if h.huffTree != nil {
+ if h.dict == nil || h.dict.litEnc != h.huffTree {
+ huffDecoderPool.Put(h.huffTree)
+ h.huffTree = nil
+ }
+ }
+}
+
+func (h *history) setDict(dict *dict) {
+ if dict == nil {
+ return
+ }
+ h.dict = dict
+ h.decoders.litLengths = dict.llDec
+ h.decoders.offsets = dict.ofDec
+ h.decoders.matchLengths = dict.mlDec
+ h.decoders.dict = dict.content
+ h.recentOffsets = dict.offsets
+ h.huffTree = dict.litEnc
+}
+
+// append bytes to history.
+// This function will make sure there is space for it,
+// if the buffer has been allocated with enough extra space.
+func (h *history) append(b []byte) {
+ if len(b) >= h.windowSize {
+ // Discard all history by simply overwriting
+ h.b = h.b[:h.windowSize]
+ copy(h.b, b[len(b)-h.windowSize:])
+ return
+ }
+
+ // If there is space, append it.
+ if len(b) < cap(h.b)-len(h.b) {
+ h.b = append(h.b, b...)
+ return
+ }
+
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(b) + len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+ copy(h.b[h.windowSize-len(b):], b)
+}
+
+// ensureBlock will ensure there is space for at least one block...
+func (h *history) ensureBlock() {
+ if cap(h.b) < h.allocFrameBuffer {
+ h.b = make([]byte, 0, h.allocFrameBuffer)
+ return
+ }
+
+ avail := cap(h.b) - len(h.b)
+ if avail >= h.windowSize || avail > maxCompressedBlockSize {
+ return
+ }
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+}
+
+// append bytes to history without ever discarding anything.
+func (h *history) appendKeep(b []byte) {
+ h.b = append(h.b, b...)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
new file mode 100644
index 000000000..24b53065f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
new file mode 100644
index 000000000..777290d44
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -0,0 +1,71 @@
+# xxhash
+
+VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
+
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
+
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
+
+```
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
new file mode 100644
index 000000000..fc40c8200
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -0,0 +1,230 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package.
+
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = primes[0] + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -primes[0]
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(memleft, b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ c := copy(memleft, b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[c:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
new file mode 100644
index 000000000..ddb63aa91
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -0,0 +1,210 @@
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
new file mode 100644
index 000000000..ae7d4d329
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -0,0 +1,184 @@
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(s *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD s+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
new file mode 100644
index 000000000..d4221edf4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -0,0 +1,16 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(s *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
new file mode 100644
index 000000000..0be16cefc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -0,0 +1,76 @@
+//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
+// +build !amd64,!arm64 appengine !gc purego noasm
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := primes[0] + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -primes[0]
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
new file mode 100644
index 000000000..6f3b0cb10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go
@@ -0,0 +1,11 @@
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
new file mode 100644
index 000000000..f41932b7a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
new file mode 100644
index 000000000..0782b86e3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -0,0 +1,66 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+ CMPL DX, $0x08
+ JB matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+#else
+ BSFQ BX, BX
+#endif
+ SHRL $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_loop_standalone:
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ CMPL DX, $0x08
+ JAE matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JB matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ LEAL -4(DX), DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JB matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ LEAL -2(DX), DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JB gen_match_len_end
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ INCL SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
new file mode 100644
index 000000000..57b9c31c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -0,0 +1,33 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+ for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
+ diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ if diff != 0 {
+ return n + bits.TrailingZeros64(diff)>>3
+ }
+ n += 8
+ }
+
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
new file mode 100644
index 000000000..d7fe6d82d
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -0,0 +1,503 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+type seq struct {
+ litLen uint32
+ matchLen uint32
+ offset uint32
+
+ // Codes are stored here for the encoder
+ // so they only have to be looked up once.
+ llCode, mlCode, ofCode uint8
+}
+
+type seqVals struct {
+ ll, ml, mo int
+}
+
+func (s seq) String() string {
+ if s.offset <= 3 {
+ if s.offset == 0 {
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
+ }
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
+ }
+ return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
+}
+
+type seqCompMode uint8
+
+const (
+ compModePredefined seqCompMode = iota
+ compModeRLE
+ compModeFSE
+ compModeRepeat
+)
+
+type sequenceDec struct {
+ // decoder keeps track of the current state and updates it from the bitstream.
+ fse *fseDecoder
+ state fseState
+ repeat bool
+}
+
+// init the state of the decoder with input from stream.
+func (s *sequenceDec) init(br *bitReader) error {
+ if s.fse == nil {
+ return errors.New("sequence decoder not defined")
+ }
+ s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Copy from dictionary...
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (seq.mo - (t + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
+ }
+ end := dictO + seq.ml
+ if end > len(s.dict) {
+ n := len(s.dict) - dictO
+ copy(out[t:], s.dict[dictO:])
+ t += n
+ seq.ml -= n
+ } else {
+ copy(out[t:], s.dict[dictO:end])
+ t += end - dictO
+ continue
+ }
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+ // We must be in current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ continue
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decodeSync(hist []byte) error {
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
+ }
+
+ br := s.br
+ seqs := s.nSeqs
+ startSize := len(s.out)
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ if debugDecoder {
+ println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
+ }
+ for i := seqs - 1; i >= 0; i-- {
+ if br.overread() {
+ printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
+ return io.ErrUnexpectedEOF
+ }
+ var ll, mo, ml int
+ if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+
+ if ll > len(s.literals) {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
+ }
+ size := ll + ml + len(out)
+ if size-startSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ if size > cap(out) {
+ // Not enough size, which can happen under high volume block streaming conditions
+ // but could be if destination slice is too small for sync operations.
+ // over-allocating here can create a large amount of GC pressure so we try to keep
+ // it as contained as possible
+ used := len(out) - startSize
+ addBytes := 256 + ll + ml + used>>2
+ // Clamp to max block size.
+ if used+addBytes > maxBlockSize {
+ addBytes = maxBlockSize - used
+ }
+ out = append(out, make([]byte, addBytes)...)
+ out = out[:len(out)-addBytes]
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+
+ // Add literals
+ out = append(out, s.literals[:ll]...)
+ s.literals = s.literals[ll:]
+
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+
+ if mo > len(out)+len(hist) || mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (mo - (len(out) + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
+ }
+ end := dictO + ml
+ if end > len(s.dict) {
+ out = append(out, s.dict[dictO:]...)
+ ml -= len(s.dict) - dictO
+ } else {
+ out = append(out, s.dict[dictO:end]...)
+ mo = 0
+ ml = 0
+ }
+ }
+
+ // Copy from history.
+ // TODO: Blocks without history could be made to ignore this completely.
+ if v := mo - len(out); v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if ml > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ out = append(out, hist[start:]...)
+ ml -= v
+ } else {
+ out = append(out, hist[start:start+ml]...)
+ ml = 0
+ }
+ }
+ // We must be in current buffer now
+ if ml > 0 {
+ start := len(out) - mo
+ if ml <= len(out)-start {
+ // No overlap
+ out = append(out, out[start:start+ml]...)
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ out = out[:len(out)+ml]
+ src := out[start : start+ml]
+ // Destination is the space we just added.
+ dst := out[len(out)-ml:]
+ dst = dst[:len(src)]
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ if i == 0 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+
+ if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+
+ // Add final literals
+ s.out = append(out, s.literals...)
+ return br.close()
+}
+
+var bitMask [16]uint16
+
+func init() {
+ for i := range bitMask[:] {
+ bitMask[i] = uint16((1 << uint(i)) - 1)
+ }
+}
+
+func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
+ // Final will not read from stream.
+ ll, llB := llState.final()
+ ml, mlB := mlState.final()
+ mo, moB := ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fill()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fill()
+ }
+ // matchlength+literal length, max 32 bits
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+ mo = s.adjustOffset(mo, ll, moB)
+ return
+}
+
+func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
+ if offsetB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = offset
+ return offset
+ }
+
+ if litLen == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ offset++
+ }
+
+ if offset == 0 {
+ return s.prevOffset[0]
+ }
+ var temp int
+ if offset == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[offset]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("temp was 0")
+ temp = 1
+ }
+
+ if offset != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ return temp
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
new file mode 100644
index 000000000..8adabd828
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -0,0 +1,394 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+type decodeSyncAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ litRemain int
+ out []byte
+ outPosition int
+ literals []byte
+ litPosition int
+ history []byte
+ windowSize int
+ ll int // set on error (not for all errors, please refer to _generate/gen.go)
+ ml int // set on error (not for all errors, please refer to _generate/gen.go)
+ mo int // set on error (not for all errors, please refer to _generate/gen.go)
+}
+
+// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// decode sequences from the stream with the provided history but without a dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ if len(s.dict) > 0 {
+ return false, nil
+ }
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
+ return false, nil
+ }
+
+ // FIXME: Using unsafe memory copies leads to rare, random crashes
+ // with fuzz testing. It is therefore disabled for now.
+ const useSafe = true
+ /*
+ useSafe := false
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
+ useSafe = true
+ }
+ if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
+ useSafe = true
+ }
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ useSafe = true
+ }
+ */
+
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeSyncAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ iteration: s.nSeqs - 1,
+ litRemain: len(s.literals),
+ out: s.out,
+ outPosition: len(s.out),
+ literals: s.literals,
+ windowSize: s.windowSize,
+ history: hist,
+ }
+
+ s.seqSize = 0
+ startSize := len(s.out)
+
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
+ }
+ } else {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
+ }
+ }
+ switch errCode {
+ case noError:
+ break
+
+ case errorMatchLenOfsMismatch:
+ return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
+
+ case errorMatchLenTooBig:
+ return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
+
+ case errorMatchOffTooBig:
+ return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ ctx.mo, ctx.outPosition+len(hist)-startSize)
+
+ case errorNotEnoughLiterals:
+ return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
+ ctx.ll, ctx.litRemain+ctx.ll)
+
+ case errorOverread:
+ return true, io.ErrUnexpectedEOF
+
+ case errorNotEnoughSpace:
+ size := ctx.outPosition + ctx.ll + ctx.ml
+ if debugDecoder {
+ println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
+ }
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ default:
+ return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ return true, err
+ }
+
+ s.literals = s.literals[ctx.litPosition:]
+ t := ctx.outPosition
+ s.out = s.out[:t]
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(s.out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
+ }
+ }
+
+ return true, nil
+}
+
+// --------------------------------------------------------------------------------
+
+type decodeAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ seqs []seqVals
+ litRemain int
+}
+
+const noError = 0
+
+// error reported when mo == 0 && ml > 0
+const errorMatchLenOfsMismatch = 1
+
+// error reported when ml > maxMatchLen
+const errorMatchLenTooBig = 2
+
+// error reported when mo > available history or mo > s.windowSize
+const errorMatchOffTooBig = 3
+
+// error reported when the sum of literal lengths exeeceds the literal buffer size
+const errorNotEnoughLiterals = 4
+
+// error reported when capacity of `out` is too small
+const errorNotEnoughSpace = 5
+
+// error reported when bits are overread.
+const errorOverread = 6
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ seqs: seqs,
+ iteration: len(seqs) - 1,
+ litRemain: len(s.literals),
+ }
+
+ if debugDecoder {
+ println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
+ }
+
+ s.seqSize = 0
+ lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
+ }
+ } else {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_amd64(s, br, &ctx)
+ }
+ }
+ if errCode != 0 {
+ i := len(seqs) - ctx.iteration - 1
+ switch errCode {
+ case errorMatchLenOfsMismatch:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+
+ case errorMatchLenTooBig:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+
+ case errorNotEnoughLiterals:
+ ll := ctx.seqs[i].ll
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ case errorOverread:
+ return io.ErrUnexpectedEOF
+ }
+
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ }
+
+ if ctx.litRemain < 0 {
+ return fmt.Errorf("literal count is too big: total available %d, total requested %d",
+ len(s.literals), len(s.literals)-ctx.litRemain)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ if debugDecoder {
+ println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------------------
+
+type executeAsmContext struct {
+ seqs []seqVals
+ seqIndex int
+ out []byte
+ history []byte
+ literals []byte
+ outPosition int
+ litPosition int
+ windowSize int
+}
+
+// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
+//
+// Returns false if a match offset is too big.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+
+// Same as above, but with safe memcopies
+//
+//go:noescape
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+
+// executeSimple handles cases when dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
+ addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ ctx := executeAsmContext{
+ seqs: seqs,
+ seqIndex: 0,
+ out: out,
+ history: hist,
+ outPosition: t,
+ litPosition: 0,
+ literals: s.literals,
+ windowSize: s.windowSize,
+ }
+ var ok bool
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
+ } else {
+ ok = sequenceDecs_executeSimple_amd64(&ctx)
+ }
+ if !ok {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
+ }
+ s.literals = s.literals[ctx.litPosition:]
+ t = ctx.outPosition
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
new file mode 100644
index 000000000..5b06174b8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -0,0 +1,4151 @@
+// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_amd64(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 32(CX), BX
+ MOVQ (CX), AX
+ MOVQ 8(CX), SI
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_end
+
+sequenceDecs_decode_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_2_end
+
+sequenceDecs_decode_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRL $0x10, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRL $0x10, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRL $0x10, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_amd64_adjust_zero
+ JEQ sequenceDecs_decode_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_amd64_adjust_three
+ JMP sequenceDecs_decode_amd64_adjust_two
+
+sequenceDecs_decode_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 24(AX)
+ MOVB BL, 32(AX)
+ MOVQ SI, 8(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 32(CX), BX
+ MOVQ (CX), AX
+ MOVQ 8(CX), SI
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_56_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_56_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_56_amd64_fill_end
+
+sequenceDecs_decode_56_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_56_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_56_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_56_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRL $0x10, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRL $0x10, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRL $0x10, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_56_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_amd64_adjust_zero
+ JEQ sequenceDecs_decode_56_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_amd64_adjust_three
+ JMP sequenceDecs_decode_56_amd64_adjust_two
+
+sequenceDecs_decode_56_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_56_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_56_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_56_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_56_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_56_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 24(AX)
+ MOVB BL, 32(AX)
+ MOVQ SI, 8(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 32(BX), DX
+ MOVQ (BX), CX
+ MOVQ 8(BX), BX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_end
+
+sequenceDecs_decode_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_2_end
+
+sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_bmi2_adjust_three
+ JMP sequenceDecs_decode_bmi2_adjust_two
+
+sequenceDecs_decode_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 24(CX)
+ MOVB DL, 32(CX)
+ MOVQ BX, 8(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 32(BX), DX
+ MOVQ (BX), CX
+ MOVQ 8(BX), BX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_56_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_56_bmi2_fill_end
+
+sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_56_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_56_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_56_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_56_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_56_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_bmi2_adjust_three
+ JMP sequenceDecs_decode_56_bmi2_adjust_two
+
+sequenceDecs_decode_56_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_56_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_56_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_56_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_56_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_56_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 24(CX)
+ MOVB DL, 32(CX)
+ MOVQ BX, 8(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ BX, R12
+ ADDQ R13, BX
+
+copy_2:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ MOVQ R11, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (SI), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, SI
+ ADDQ $0x10, BX
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(SI)(R14*1), SI
+ LEAQ 16(BX)(R14*1), BX
+ MOVUPS -16(SI), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ R11, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ R11, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (SI), R14
+ MOVB -1(SI)(R11*1), R15
+ MOVB R14, (BX)
+ MOVB R15, -1(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (SI), R14
+ MOVB 2(SI), R15
+ MOVW R14, (BX)
+ MOVB R15, 2(BX)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (SI), R14
+ MOVL -4(SI)(R11*1), R15
+ MOVL R14, (BX)
+ MOVL R15, -4(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (SI), R14
+ MOVQ -8(SI)(R11*1), R15
+ MOVQ R14, (BX)
+ MOVQ R15, -8(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+
+copy_1_end:
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R11
+ ADDQ $0x10, BX
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(R11)(R12*1), R11
+ LEAQ 16(BX)(R12*1), BX
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (R11), R12
+ MOVB -1(R11)(R13*1), R14
+ MOVB R12, (BX)
+ MOVB R14, -1(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (R11), R12
+ MOVB 2(R11), R14
+ MOVW R12, (BX)
+ MOVB R14, 2(BX)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (R11), R12
+ MOVL -4(R11)(R13*1), R14
+ MOVL R12, (BX)
+ MOVL R14, -4(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (R11), R12
+ MOVQ -8(R11)(R13*1), R14
+ MOVQ R12, (BX)
+ MOVQ R14, -8(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 32(CX), BX
+ MOVQ (CX), AX
+ MOVQ 8(CX), SI
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_end
+
+sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_end
+
+sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRL $0x10, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRL $0x10, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRL $0x10, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R10, CX
+ ADDQ R13, R10
+
+copy_2:
+ MOVUPS (AX), X0
+ MOVUPS X0, (CX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, CX
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 24(AX)
+ MOVB BL, 32(AX)
+ MOVQ SI, 8(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 32(BX), DX
+ MOVQ (BX), CX
+ MOVQ 8(BX), BX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_end
+
+sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R9, R12
+ ADDQ R13, R9
+
+copy_2:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 24(CX)
+ MOVB DL, 32(CX)
+ MOVQ BX, 8(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 32(CX), BX
+ MOVQ (CX), AX
+ MOVQ 8(CX), SI
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_safe_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRL $0x10, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRL $0x10, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRL $0x10, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_safe_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_safe_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ MOVQ AX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R10
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R11)(R14*1), R11
+ LEAQ 16(R10)(R14*1), R10
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ AX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ AX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R11), R14
+ MOVB -1(R11)(AX*1), R15
+ MOVB R14, (R10)
+ MOVB R15, -1(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R11), R14
+ MOVB 2(R11), R15
+ MOVW R14, (R10)
+ MOVB R15, 2(R10)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R11), R14
+ MOVL -4(R11)(AX*1), R15
+ MOVL R14, (R10)
+ MOVL R15, -4(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R11), R14
+ MOVQ -8(R11)(AX*1), R15
+ MOVQ R14, (R10)
+ MOVQ R15, -8(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+
+copy_1_end:
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (AX), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, AX
+ ADDQ $0x10, R10
+ SUBQ $0x10, CX
+ JAE copy_2_loop
+ LEAQ 16(AX)(CX*1), AX
+ LEAQ 16(R10)(CX*1), R10
+ MOVUPS -16(AX), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (AX), CL
+ MOVB -1(AX)(R13*1), R14
+ MOVB CL, (R10)
+ MOVB R14, -1(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (AX), CX
+ MOVB 2(AX), R14
+ MOVW CX, (R10)
+ MOVB R14, 2(R10)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (AX), CX
+ MOVL -4(AX)(R13*1), R14
+ MOVL CX, (R10)
+ MOVL R14, -4(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (AX), CX
+ MOVQ -8(AX)(R13*1), R14
+ MOVQ CX, (R10)
+ MOVQ R14, -8(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_safe_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 24(AX)
+ MOVB BL, 32(AX)
+ MOVQ SI, 8(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 32(BX), DX
+ MOVQ (BX), CX
+ MOVQ 8(BX), BX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_safe_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_safe_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ MOVQ CX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R10), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R10
+ ADDQ $0x10, R9
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R10)(R14*1), R10
+ LEAQ 16(R9)(R14*1), R9
+ MOVUPS -16(R10), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ CX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ CX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R10), R14
+ MOVB -1(R10)(CX*1), R15
+ MOVB R14, (R9)
+ MOVB R15, -1(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R10), R14
+ MOVB 2(R10), R15
+ MOVW R14, (R9)
+ MOVB R15, 2(R9)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R10), R14
+ MOVL -4(R10)(CX*1), R15
+ MOVL R14, (R9)
+ MOVL R15, -4(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R10), R14
+ MOVQ -8(R10)(CX*1), R15
+ MOVQ R14, (R9)
+ MOVQ R15, -8(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+
+copy_1_end:
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R9
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(CX)(R12*1), CX
+ LEAQ 16(R9)(R12*1), R9
+ MOVUPS -16(CX), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (CX), R12
+ MOVB -1(CX)(R13*1), R14
+ MOVB R12, (R9)
+ MOVB R14, -1(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (CX), R12
+ MOVB 2(CX), R14
+ MOVW R12, (R9)
+ MOVB R14, 2(R9)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (CX), R12
+ MOVL -4(CX)(R13*1), R14
+ MOVL R12, (R9)
+ MOVL R14, -4(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (CX), R12
+ MOVQ -8(CX)(R13*1), R14
+ MOVQ R12, (R9)
+ MOVQ R14, -8(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_safe_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 24(CX)
+ MOVB DL, 32(CX)
+ MOVQ BX, 8(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
new file mode 100644
index 000000000..2fb35b788
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -0,0 +1,237 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+// decode sequences from the stream with the provided history but without dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ return false, nil
+}
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// executeSimple handles cases when a dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Malformed input
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into the current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+
+ // We must be in the current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
new file mode 100644
index 000000000..8014174a7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -0,0 +1,114 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import "math/bits"
+
+type seqCoders struct {
+ llEnc, ofEnc, mlEnc *fseEncoder
+ llPrev, ofPrev, mlPrev *fseEncoder
+}
+
+// swap coders with another (block).
+func (s *seqCoders) swap(other *seqCoders) {
+ *s, *other = *other, *s
+}
+
+// setPrev will update the previous encoders to the actually used ones
+// and make sure a fresh one is in the main slot.
+func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
+ compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
+ // We used the new one, more current to history and reuse the previous history
+ if *current == used {
+ *prev, *current = *current, *prev
+ c := *current
+ p := *prev
+ c.reUsed = false
+ p.reUsed = true
+ return
+ }
+ if used == *prev {
+ return
+ }
+ // Ensure we cannot reuse by accident
+ prevEnc := *prev
+ prevEnc.symbolLen = 0
+ }
+ compareSwap(ll, &s.llEnc, &s.llPrev)
+ compareSwap(ml, &s.mlEnc, &s.mlPrev)
+ compareSwap(of, &s.ofEnc, &s.ofPrev)
+}
+
+func highBit(val uint32) (n uint32) {
+ return uint32(bits.Len32(val) - 1)
+}
+
+var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24}
+
+// Up to 6 bits
+const maxLLCode = 35
+
+// llBitsTable translates from ll code to number of bits.
+var llBitsTable = [maxLLCode + 1]byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16}
+
+// llCode returns the code that represents the literal length requested.
+func llCode(litLength uint32) uint8 {
+ const llDeltaCode = 19
+ if litLength <= 63 {
+ // Compiler insists on bounds check (Go 1.12)
+ return llCodeTable[litLength&63]
+ }
+ return uint8(highBit(litLength)) + llDeltaCode
+}
+
+var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
+
+// Up to 6 bits
+const maxMLCode = 52
+
+// mlBitsTable translates from ml code to number of bits.
+var mlBitsTable = [maxMLCode + 1]byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16}
+
+// note : mlBase = matchLength - MINMATCH;
+// because it's the format it's stored in seqStore->sequences
+func mlCode(mlBase uint32) uint8 {
+ const mlDeltaCode = 36
+ if mlBase <= 127 {
+ // Compiler insists on bounds check (Go 1.12)
+ return mlCodeTable[mlBase&127]
+ }
+ return uint8(highBit(mlBase)) + mlDeltaCode
+}
+
+func ofCode(offset uint32) uint8 {
+ // A valid offset will always be > 0.
+ return uint8(bits.Len32(offset) - 1)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
new file mode 100644
index 000000000..ec13594e8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -0,0 +1,434 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "io"
+
+ "github.com/klauspost/compress/huff0"
+ snappy "github.com/klauspost/compress/internal/snapref"
+)
+
+const (
+ snappyTagLiteral = 0x00
+ snappyTagCopy1 = 0x01
+ snappyTagCopy2 = 0x02
+ snappyTagCopy4 = 0x03
+)
+
+const (
+ snappyChecksumSize = 4
+ snappyMagicBody = "sNaPpY"
+
+ // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ snappyMaxBlockSize = 65536
+
+ // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ snappyMaxEncodedLenOfMaxBlockSize = 76490
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var (
+ // ErrSnappyCorrupt reports that the input is invalid.
+ ErrSnappyCorrupt = errors.New("snappy: corrupt input")
+ // ErrSnappyTooLarge reports that the uncompressed length is too large.
+ ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrSnappyUnsupported reports that the input isn't supported.
+ ErrSnappyUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
+// Conversion is done by converting the stream directly from Snappy without intermediate
+// full decoding.
+// Therefore the compression ratio is much less than what can be done by a full decompression
+// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
+// any errors being generated.
+// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
+// However, it provides really fast recompression of Snappy streams.
+// The converter can be reused to avoid allocations, even after errors.
+type SnappyConverter struct {
+ r io.Reader
+ err error
+ buf []byte
+ block *blockEnc
+}
+
+// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
+// If any error is detected on the Snappy stream it is returned.
+// The number of bytes written is returned.
+func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
+ initPredefined()
+ r.err = nil
+ r.r = in
+ if r.block == nil {
+ r.block = &blockEnc{}
+ r.block.init()
+ }
+ r.block.initNewEncode()
+ if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
+ r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
+ }
+ r.block.litEnc.Reuse = huff0.ReusePolicyNone
+ var written int64
+ var readHeader bool
+ {
+ header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+
+ var n int
+ n, r.err = w.Write(header)
+ if r.err != nil {
+ return written, r.err
+ }
+ written += int64(n)
+ }
+
+ for {
+ if !r.readFull(r.buf[:4], true) {
+ // Add empty last block
+ r.block.reset(nil)
+ r.block.last = true
+ err := r.block.encodeLits(r.block.literals, false)
+ if err != nil {
+ return written, err
+ }
+ n, err := w.Write(r.block.output)
+ if err != nil {
+ return written, err
+ }
+ written += int64(n)
+
+ return written, r.err
+ }
+ chunkType := r.buf[0]
+ if !readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ println("chunkType != chunkTypeStreamIdentifier", chunkType)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ println("chunkLen > len(r.buf)", chunkType)
+ r.err = ErrSnappyUnsupported
+ return written, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < snappyChecksumSize {
+ println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return written, r.err
+ }
+ //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[snappyChecksumSize:]
+
+ n, hdr, err := snappyDecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return written, r.err
+ }
+ buf = buf[hdr:]
+ if n > snappyMaxBlockSize {
+ println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.reset(nil)
+ r.block.pushOffsets()
+ if err := decodeSnappy(r.block, buf); err != nil {
+ r.err = err
+ return written, r.err
+ }
+ if r.block.size+r.block.extraLits != n {
+ printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ err = r.block.encode(nil, false, false)
+ switch err {
+ case errIncompressible:
+ r.block.popOffsets()
+ r.block.reset(nil)
+ r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
+ if err != nil {
+ return written, err
+ }
+ err = r.block.encodeLits(r.block.literals, false)
+ if err != nil {
+ return written, err
+ }
+ case nil:
+ default:
+ return written, err
+ }
+
+ n, r.err = w.Write(r.block.output)
+ if r.err != nil {
+ return written, err
+ }
+ written += int64(n)
+ continue
+ case chunkTypeUncompressedData:
+ if debugEncoder {
+ println("Uncompressed, chunklen", chunkLen)
+ }
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < snappyChecksumSize {
+ println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.reset(nil)
+ buf := r.buf[:snappyChecksumSize]
+ if !r.readFull(buf, false) {
+ return written, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - snappyChecksumSize
+ if n > snappyMaxBlockSize {
+ println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ r.block.literals = r.block.literals[:n]
+ if !r.readFull(r.block.literals, false) {
+ return written, r.err
+ }
+ if snappyCRC(r.block.literals) != checksum {
+ println("literals crc mismatch")
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ err := r.block.encodeLits(r.block.literals, false)
+ if err != nil {
+ return written, err
+ }
+ n, r.err = w.Write(r.block.output)
+ if r.err != nil {
+ return written, err
+ }
+ written += int64(n)
+ continue
+
+ case chunkTypeStreamIdentifier:
+ if debugEncoder {
+ println("stream id", chunkLen, len(snappyMagicBody))
+ }
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(snappyMagicBody) {
+ println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
+ return written, r.err
+ }
+ for i := 0; i < len(snappyMagicBody); i++ {
+ if r.buf[i] != snappyMagicBody[i] {
+ println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
+ r.err = ErrSnappyCorrupt
+ return written, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ println("chunkType <= 0x7f")
+ r.err = ErrSnappyUnsupported
+ return written, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return written, r.err
+ }
+ }
+}
+
+// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read.
+func decodeSnappy(blk *blockEnc, src []byte) error {
+ //decodeRef(make([]byte, snappyMaxBlockSize), src)
+ var s, length int
+ lits := blk.extraLits
+ var offset uint32
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case snappyTagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, src)
+ return ErrSnappyCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ if x > snappyMaxBlockSize {
+ println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
+ return ErrSnappyCorrupt
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ println("length <= 0 ", length)
+
+ return errUnsupportedLiteralLength
+ }
+ //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
+ // return ErrSnappyCorrupt
+ //}
+
+ blk.literals = append(blk.literals, src[s:s+length]...)
+ //println(length, "litLen")
+ lits += length
+ s += length
+ continue
+
+ case snappyTagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
+
+ case snappyTagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = uint32(src[s-2]) | uint32(src[s-1])<<8
+
+ case snappyTagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ println("uint(s) > uint(len(src)", s, len(src))
+ return ErrSnappyCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+
+ if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
+ println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
+
+ return ErrSnappyCorrupt
+ }
+
+ // Check if offset is one of the recent offsets.
+ // Adjusts the output offset accordingly.
+ // Gives a tiny bit of compression, typically around 1%.
+ if false {
+ offset = blk.matchOffset(offset, uint32(lits))
+ } else {
+ offset += 3
+ }
+
+ blk.sequences = append(blk.sequences, seq{
+ litLen: uint32(lits),
+ offset: offset,
+ matchLen: uint32(length) - zstdMinMatch,
+ })
+ blk.size += length + lits
+ lits = 0
+ }
+ blk.extraLits = lits
+ return nil
+}
+
+func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrSnappyCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func snappyCRC(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return c>>15 | c<<17 + 0xa282ead8
+}
+
+// snappyDecodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrSnappyCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrSnappyTooLarge
+ }
+ return int(v), n, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
new file mode 100644
index 000000000..29c15c8c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -0,0 +1,141 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip.
+// See https://www.winzip.com/win/en/comp_info.html
+const ZipMethodWinZip = 93
+
+// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression.
+// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression.
+// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
+const ZipMethodPKWare = 20
+
+// zipReaderPool is the default reader pool.
+var zipReaderPool = sync.Pool{New: func() interface{} {
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
+ if err != nil {
+ panic(err)
+ }
+ return z
+}}
+
+// newZipReader creates a pooled zip decompressor.
+func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ pool := &zipReaderPool
+ if len(opts) > 0 {
+ opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
+ // Force concurrency 1
+ opts = append(opts, WithDecoderConcurrency(1))
+ // Create our own pool
+ pool = &sync.Pool{}
+ }
+ return func(r io.Reader) io.ReadCloser {
+ dec, ok := pool.Get().(*Decoder)
+ if ok {
+ dec.Reset(r)
+ } else {
+ d, err := NewReader(r, opts...)
+ if err != nil {
+ panic(err)
+ }
+ dec = d
+ }
+ return &pooledZipReader{dec: dec, pool: pool}
+ }
+}
+
+type pooledZipReader struct {
+ mu sync.Mutex // guards Close and Read
+ pool *sync.Pool
+ dec *Decoder
+}
+
+func (r *pooledZipReader) Read(p []byte) (n int, err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.dec == nil {
+ return 0, errors.New("read after close or EOF")
+ }
+ dec, err := r.dec.Read(p)
+ if err == io.EOF {
+ r.dec.Reset(nil)
+ r.pool.Put(r.dec)
+ r.dec = nil
+ }
+ return dec, err
+}
+
+func (r *pooledZipReader) Close() error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ var err error
+ if r.dec != nil {
+ err = r.dec.Reset(nil)
+ r.pool.Put(r.dec)
+ r.dec = nil
+ }
+ return err
+}
+
+type pooledZipWriter struct {
+ mu sync.Mutex // guards Close and Read
+ enc *Encoder
+ pool *sync.Pool
+}
+
+func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.enc == nil {
+ return 0, errors.New("Write after Close")
+ }
+ return w.enc.Write(p)
+}
+
+func (w *pooledZipWriter) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ var err error
+ if w.enc != nil {
+ err = w.enc.Close()
+ w.pool.Put(w.enc)
+ w.enc = nil
+ }
+ return err
+}
+
+// ZipCompressor returns a compressor that can be registered with zip libraries.
+// The provided encoder options will be used on all encodes.
+func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
+ var pool sync.Pool
+ return func(w io.Writer) (io.WriteCloser, error) {
+ enc, ok := pool.Get().(*Encoder)
+ if ok {
+ enc.Reset(w)
+ } else {
+ var err error
+ enc, err = NewWriter(w, opts...)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &pooledZipWriter{enc: enc, pool: &pool}, nil
+ }
+}
+
+// ZipDecompressor returns a decompressor that can be registered with zip libraries.
+// See ZipCompressor for example.
+// Options can be specified. WithDecoderConcurrency(1) is forced,
+// and by default a 128MB maximum decompression window is specified.
+// The window size can be overridden if required.
+func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ return newZipReader(opts...)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
new file mode 100644
index 000000000..4be7cc736
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -0,0 +1,121 @@
+// Package zstd provides decompression of zstandard files.
+//
+// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
+package zstd
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "log"
+ "math"
+)
+
+// enable debug printing
+const debug = false
+
+// enable encoding debug printing
+const debugEncoder = debug
+
+// enable decoding debug printing
+const debugDecoder = debug
+
+// Enable extra assertions.
+const debugAsserts = debug || false
+
+// print sequence details
+const debugSequences = false
+
+// print detailed matching information
+const debugMatches = false
+
+// force encoder to use predefined tables.
+const forcePreDef = false
+
+// zstdMinMatch is the minimum zstd match length.
+const zstdMinMatch = 3
+
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
+var (
+ // ErrReservedBlockType is returned when a reserved block type is found.
+ // Typically this indicates wrong or corrupted input.
+ ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
+
+ // ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
+ // Typically this indicates wrong or corrupted input.
+ ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
+
+ // ErrBlockTooSmall is returned when a block is too small to be decoded.
+ // Typically returned on invalid input.
+ ErrBlockTooSmall = errors.New("block too small")
+
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
+ // ErrMagicMismatch is returned when a "magic" number isn't what is expected.
+ // Typically this indicates wrong or corrupted input.
+ ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
+
+ // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
+ // Typically this indicates wrong or corrupted input.
+ ErrWindowSizeExceeded = errors.New("window size exceeded")
+
+ // ErrWindowSizeTooSmall is returned when no window size is specified.
+ // Typically this indicates wrong or corrupted input.
+ ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
+
+ // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
+ ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
+
+ // ErrUnknownDictionary is returned if the dictionary ID is unknown.
+ ErrUnknownDictionary = errors.New("unknown dictionary")
+
+ // ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeExceeded = errors.New("frame size exceeded")
+
+ // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
+
+ // ErrCRCMismatch is returned if CRC mismatches.
+ ErrCRCMismatch = errors.New("CRC check failed")
+
+ // ErrDecoderClosed will be returned if the Decoder was used after
+ // Close has been called.
+ ErrDecoderClosed = errors.New("decoder used after Close")
+
+ // ErrDecoderNilInput is returned when a nil Reader was provided
+ // and an operation other than Reset/DecodeAll/Close was attempted.
+ ErrDecoderNilInput = errors.New("nil input provided as reader")
+)
+
+func println(a ...interface{}) {
+ if debug || debugDecoder || debugEncoder {
+ log.Println(a...)
+ }
+}
+
+func printf(format string, a ...interface{}) {
+ if debug || debugDecoder || debugEncoder {
+ log.Printf(format, a...)
+ }
+}
+
+func load3232(b []byte, i int32) uint32 {
+ return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
+}
+
+func load6432(b []byte, i int32) uint64 {
+ return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
+}
+
+type byter interface {
+ Bytes() []byte
+ Len() int
+}
+
+var _ byter = &bytes.Buffer{}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 000000000..9159de03e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 000000000..835ba3e75
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 000000000..ce9d7cded
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 000000000..54dfdcb12
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 000000000..a932eade0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 000000000..161aea258
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 000000000..be0d10d0c
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 000000000..779a8348f
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
index dd878a30e..b9cc55abb 100644
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
new file mode 100644
index 000000000..65d761bc9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
new file mode 100644
index 000000000..8547c8dfd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 000000000..2e45780b7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index ad9a71a5e..520cbd7d4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -22,13 +22,13 @@ import (
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
-// populated using runtime/metrics.
+// populated using runtime/metrics. Those are the defaults we can't alter.
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
@@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
@@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
@@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
- "Total number of frees.",
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
@@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
@@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
@@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
@@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
@@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
@@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
- "Number of allocated objects.",
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
@@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
@@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
@@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
@@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
@@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
@@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
@@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
@@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
@@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
@@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
@@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
"go_memstats_last_gc_time_seconds",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 2d8d9f64f..511746417 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -17,6 +17,7 @@
package prometheus
import (
+ "fmt"
"math"
"runtime"
"runtime/metrics"
@@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions {
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
},
RuntimeMetricRules: []internal.GoCollectorRule{
- //{Matcher: regexp.MustCompile("")},
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
},
}
}
@@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
+ help := attachOriginalName(d.Description.Description, d.Name)
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
@@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
- d.Description.Description,
+ help,
nil,
nil,
),
@@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
},
)
} else {
@@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
})
}
metricSet = append(metricSet, m)
@@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
}
}
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s", desc, origName)
+}
+
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
@@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
- panic("unexpected unsupported metric")
+ panic("unexpected bad kind metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
- panic("unexpected unsupported metric kind")
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index b5c8bcb39..519db348a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -440,7 +440,7 @@ type HistogramOpts struct {
// constant (or any negative float value).
NativeHistogramZeroThreshold float64
- // The remaining fields define a strategy to limit the number of
+ // The next three fields define a strategy to limit the number of
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
// at zero, the number of buckets is not limited. (Note that this might
// lead to unbounded memory consumption if the values observed by the
@@ -473,6 +473,22 @@ type HistogramOpts struct {
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
if opts.afterFunc == nil {
opts.afterFunc = time.AfterFunc
}
+
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -725,7 +743,8 @@ type histogram struct {
// resetScheduled is protected by mtx. It is true if a reset is
// scheduled for a later time (when nativeHistogramMinResetDuration has
// passed).
- resetScheduled bool
+ resetScheduled bool
+ nativeExemplars nativeExemplars
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error {
Length: proto.Uint32(0),
}}
}
+
+ if h.nativeExemplars.isEnabled() {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) {
deleteSyncMap(&counts.nativeHistogramBucketsPositive)
}
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
panic(err)
}
h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -1336,6 +1371,48 @@ func MustNewConstHistogram(
return m
}
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) {
atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+ // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+ ttl time.Duration
+
+ exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ ttl = -1
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if !n.isEnabled() {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ var nIdx int
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ if len(n.exemplars) == 1 {
+ // When the number of exemplars is 1, then
+ // replace the existing exemplar with the new exemplar.
+ n.exemplars[0] = e
+ return
+ }
+ // From this point on, the number of exemplars is greater than 1.
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+ // The insertion point of the new exemplar in the exemplars slice after insertion.
+ // This is calculated purely based on the order of the exemplars by value.
+ // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+ nIdx = -1
+
+ // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+ // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+ // It is calculated in 3 steps:
+ // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+ // That is the following will be true (on log scale):
+ // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+ // the closest values to each other from all pairs.
+ // For example, suppose the values are distributed like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // Or like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+ // 3. We check if by inserting the new exemplar we would create a closer pair at
+ // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+ // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+ rIdx = -1
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if nIdx == -1 && *e.Value <= *exemplar.Value {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ // The closest exemplar pair is at index: i-1, i.
+ // Choose the exemplar with the older timestamp for replacement.
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ rIdx = i
+ } else {
+ rIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+ // Here, we have the following relationships:
+ // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+ // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ // If the oldest exemplar has expired, then replace it with the new exemplar.
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-n-----------x----------------x----x-----|
+ // nIdx-1--^ ^--new exemplar value
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ md = diff
+ rIdx = nIdx - 1
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-----------n-x----------------x----x-----|
+ // new exemplar value--^ ^--nIdx
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ rIdx = nIdx
+ }
+ }
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
index 723b45d64..a4fa6eabd 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -30,3 +30,5 @@ type GoCollectorOptions struct {
RuntimeMetricSumForHist map[string]string
RuntimeMetricRules []GoCollectorRule
}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index f018e5723..9d9b81ab4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
)
for i, e := range exemplars {
ts := e.Timestamp
- if ts == (time.Time{}) {
+ if ts.IsZero() {
ts = now
}
exs[i], err = newExemplar(e.Value, ts, e.Labels)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 8548dd18e..62a4e7ad9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -22,14 +22,15 @@ import (
)
type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
}
if opts.PidFn == nil {
@@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
}
// Collect returns the current state of all metrics of the collector.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 8c1136cee..14d56d2d0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
} else {
c.reportError(ch, nil, err)
}
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.IpExt.InOctets != nil {
+ inOctets = *netstat.IpExt.InOctets
+ }
+ if netstat.IpExt.OutOctets != nil {
+ outOctets = *netstat.IpExt.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index 9819917b8..315eab5f1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+ return r.ResponseWriter
+}
+
type (
closeNotifierDelegator struct{ *responseWriterDelegator }
flusherDelegator struct{ *responseWriterDelegator }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index 09b8d2fbe..e598e66e6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -38,12 +38,13 @@ import (
"io"
"net/http"
"strconv"
- "strings"
"sync"
"time"
+ "github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
)
@@ -54,6 +55,18 @@ const (
processStartTimeHeader = "Process-Start-Time-Unix"
)
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+ Identity Compression = "identity"
+ Gzip Compression = "gzip"
+ Zstd Compression = "zstd"
+)
+
+var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
+
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
}
+ // Select compression formats to offer based on default or user choice.
+ var compressions []string
+ if !opts.DisableCompression {
+ offers := defaultCompressionFormats
+ if len(opts.OfferedCompressions) > 0 {
+ offers = opts.OfferedCompressions
+ }
+ for _, comp := range offers {
+ compressions = append(compressions, string(comp))
+ }
+ }
+
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
@@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} else {
contentType = expfmt.Negotiate(req.Header)
}
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
+ rsp.Header().Set(contentTypeHeader, string(contentType))
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
+ w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error getting writer", err)
+ }
+ w = io.Writer(rsp)
+ encodingHeader = string(Identity)
+ }
- gz.Reset(w)
- defer gz.Close()
+ defer closeWriter()
- w = gz
+ // Set Content-Encoding only when data is compressed
+ if encodingHeader != string(Identity) {
+ rsp.Header().Set(contentEncodingHeader, encodingHeader)
}
-
enc := expfmt.NewEncoder(w, contentType)
// handleError handles the error according to opts.ErrorHandling
@@ -343,9 +370,19 @@ type HandlerOpts struct {
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
+ // DisableCompression disables the response encoding (compression) and
+ // encoding negotiation. If true, the handler will
+ // never compress the response, even if requested
+ // by the client and the OfferedCompressions field is set.
DisableCompression bool
+ // OfferedCompressions is a set of encodings (compressions) handler will
+ // try to offer when negotiating with the client. This defaults to identity, gzip
+ // and zstd.
+ // NOTE: If handler can't agree with the client on the encodings or
+ // unsupported or empty encodings are set in OfferedCompressions,
+ // handler always fallbacks to no compression (identity), for
+ // compatibility reasons. In such cases ErrorLog will be used if set.
+ OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -381,19 +418,6 @@ type HandlerOpts struct {
ProcessStartTime time.Time
}
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
-}
-
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
@@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) {
http.StatusInternalServerError,
)
}
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+ if len(compressions) == 0 {
+ return rw, string(Identity), func() {}, nil
+ }
+
+ // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
+ selected := httputil.NegotiateContentEncoding(r, compressions)
+
+ switch selected {
+ case "zstd":
+ // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
+ z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
+ if err != nil {
+ return nil, "", func() {}, err
+ }
+
+ z.Reset(rw)
+ return z, selected, func() { _ = z.Close() }, nil
+ case "gzip":
+ gz := gzipPool.Get().(*gzip.Writer)
+ gz.Reset(rw)
+ return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+ case "identity":
+ // This means the content is not compressed.
+ return rw, selected, func() {}, nil
+ default:
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 5e2ced25a..c6fd2f58b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
+ continue
+ }
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
+ continue
}
+ newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 146270444..1ab0e4796 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -783,3 +783,45 @@ func MustNewConstSummary(
}
return m
}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 955cfd59f..2c808eece 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
return metric
}
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 25cfaa216..1448439b7 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct)
if err != nil {
- return fmtUnknown
+ return FmtUnknown
}
const textType = "text/plain"
@@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
switch mediatype {
case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return fmtUnknown
+ return FmtUnknown
}
if e, ok := params["encoding"]; ok && e != "delimited" {
- return fmtUnknown
+ return FmtUnknown
}
- return fmtProtoDelim
+ return FmtProtoDelim
case textType:
if v, ok := params["version"]; ok && v != TextVersion {
- return fmtUnknown
+ return FmtUnknown
}
- return fmtText
+ return FmtText
}
- return fmtUnknown
+ return FmtUnknown
}
// NewDecoder returns a new decoder based on the given input format.
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index ff5ef7a9d..d7f3d76f5 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ escapingScheme = Format("; escaping=" + escapeParam)
default:
// If the escaping parameter is unknown, ignore it.
}
@@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return fmtProtoDelim + escapingScheme
+ return FmtProtoDelim + escapingScheme
case "text":
- return fmtProtoText + escapingScheme
+ return FmtProtoText + escapingScheme
case "compact-text":
- return fmtProtoCompact + escapingScheme
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
}
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
- escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ escapingScheme = Format("; escaping=" + escapeParam)
default:
// If the escaping parameter is unknown, ignore it.
}
@@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return fmtProtoDelim + escapingScheme
+ return FmtProtoDelim + escapingScheme
case "text":
- return fmtProtoText + escapingScheme
+ return FmtProtoText + escapingScheme
case "compact-text":
- return fmtProtoCompact + escapingScheme
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
switch ver {
case OpenMetricsVersion_1_0_0:
- return fmtOpenMetrics_1_0_0 + escapingScheme
+ return FmtOpenMetrics_1_0_0 + escapingScheme
default:
- return fmtOpenMetrics_0_0_1 + escapingScheme
+ return FmtOpenMetrics_0_0_1 + escapingScheme
}
}
}
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 051b38cd1..b26886560 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -15,7 +15,7 @@
package expfmt
import (
- "fmt"
+ "errors"
"strings"
"github.com/prometheus/common/model"
@@ -32,24 +32,31 @@ type Format string
// it on the wire, new content-type strings will have to be agreed upon and
// added here.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
- // The Content-Type values for the different wire protocols. Note that these
- // values are now unexported. If code was relying on comparisons to these
- // constants, instead use FormatType().
- fmtUnknown Format = ``
- fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- fmtProtoDelim Format = protoFmt + ` encoding=delimited`
- fmtProtoText Format = protoFmt + ` encoding=text`
- fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
- fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
- fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
+ // The Content-Type values for the different wire protocols. Do not do direct
+ // comparisons to these constants, instead use the comparison functions.
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+ FmtUnknown Format = ``
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
@@ -79,17 +86,17 @@ const (
func NewFormat(t FormatType) Format {
switch t {
case TypeProtoCompact:
- return fmtProtoCompact
+ return FmtProtoCompact
case TypeProtoDelim:
- return fmtProtoDelim
+ return FmtProtoDelim
case TypeProtoText:
- return fmtProtoText
+ return FmtProtoText
case TypeTextPlain:
- return fmtText
+ return FmtText
case TypeOpenMetrics:
- return fmtOpenMetrics_1_0_0
+ return FmtOpenMetrics_1_0_0
default:
- return fmtUnknown
+ return FmtUnknown
}
}
@@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format {
// specified version number.
func NewOpenMetricsFormat(version string) (Format, error) {
if version == OpenMetricsVersion_0_0_1 {
- return fmtOpenMetrics_0_0_1, nil
+ return FmtOpenMetrics_0_0_1, nil
}
if version == OpenMetricsVersion_1_0_0 {
- return fmtOpenMetrics_1_0_0, nil
+ return FmtOpenMetrics_1_0_0, nil
}
- return fmtUnknown, fmt.Errorf("unknown open metrics version string")
+ return FmtUnknown, errors.New("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+ var terms []string
+ for _, p := range strings.Split(string(f), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ trimmed := strings.TrimSpace(p)
+ if len(trimmed) > 0 {
+ terms = append(terms, trimmed)
+ }
+ continue
+ }
+ key := strings.TrimSpace(toks[0])
+ if key != model.EscapingKey {
+ terms = append(terms, strings.TrimSpace(p))
+ }
+ }
+ terms = append(terms, model.EscapingKey+"="+s.String())
+ return Format(strings.Join(terms, "; "))
}
// FormatType deduces an overall FormatType for the given format.
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 353c5e93f..a21ed4ec1 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption)
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
// to include _created lines (See
-// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
// Created timestamps can improve the accuracy of series reset detection, but
// come with a bandwidth cost.
//
@@ -102,7 +102,7 @@ func WithUnit() EncoderOption {
//
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
// the unit has to be present in the metric name as its suffix:
-// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
+// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
// However, in order to accommodate any potential scenario where such a change in the
// metric name is not desirable, the users are here given the choice of either explicitly
// opt in, in case they wish for the unit to be included in the output AND in the metric name
@@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
compliantName = name[:len(name)-6]
}
- if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
- compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+ compliantName = compliantName + "_" + *in.Unit
}
// Comments, first HELP, then TYPE.
@@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index f9b8265a9..4b86434b3 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if model.IsValidLegacyMetricName(name) {
return w.WriteString(name)
}
var written int
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 26490211a..b4607fe4d 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -22,9 +22,9 @@ import (
"math"
"strconv"
"strings"
+ "unicode/utf8"
dto "github.com/prometheus/client_model/go"
-
"google.golang.org/protobuf/proto"
"github.com/prometheus/common/model"
@@ -60,6 +60,7 @@ type TextParser struct {
currentMF *dto.MetricFamily
currentMetric *dto.Metric
currentLabelPair *dto.LabelPair
+ currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
// The remaining member variables are only used for summaries/histograms.
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
@@ -74,6 +75,9 @@ type TextParser struct {
// count and sum of that summary/histogram.
currentIsSummaryCount, currentIsSummarySum bool
currentIsHistogramCount, currentIsHistogramSum bool
+ // These indicate if the metric name from the current line being parsed is inside
+ // braces and if that metric name was found respectively.
+ currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) {
}
p.currentQuantile = math.NaN()
p.currentBucket = math.NaN()
+ p.currentMF = nil
}
// startOfLine represents the state where the next byte read from p.buf is the
// start of a line (or whitespace leading up to it).
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
+ p.currentMetricIsInsideBraces = false
+ p.currentMetricInsideBracesIsPresent = false
if p.skipBlankTab(); p.err != nil {
// This is the only place that we expect to see io.EOF,
// which is not an error but the signal that we are done.
@@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn {
return p.startComment
case '\n':
return p.startOfLine // Empty line, start the next one.
+ case '{':
+ p.currentMetricIsInsideBraces = true
+ return p.readingLabels
}
return p.readingMetricName
}
@@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn {
return nil // Unexpected end of input.
}
if p.currentByte == '}' {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
return nil
}
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ if p.currentMetricIsInsideBraces {
+ if p.currentMetricInsideBracesIsPresent {
+ p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ switch p.currentByte {
+ case ',':
+ p.setOrCreateCurrentMF()
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetricInsideBracesIsPresent = true
+ return p.startLabelName
+ case '}':
+ p.setOrCreateCurrentMF()
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+ return nil
+ }
+ }
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ p.currentLabelPairs = nil
+ return nil
+ }
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
@@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn {
// labels to 'real' labels.
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- return nil
+ p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
labels := make(map[string]struct{})
- for _, l := range p.currentMetric.Label {
+ for _, l := range p.currentLabelPairs {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+ p.currentLabelPairs = nil
return nil
}
}
@@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn {
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
} else {
@@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn {
return p.startLabelName
case '}':
+ if p.currentMF == nil {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
}
@@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
@@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
// but not into p.currentToken.
func (p *TextParser) readTokenAsMetricName() {
p.currentToken.Reset()
+ // A UTF-8 metric name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidMetricNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
return
}
}
@@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() {
// but not into p.currentToken.
func (p *TextParser) readTokenAsLabelName() {
p.currentToken.Reset()
+ // A UTF-8 label name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidLabelNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
return
}
}
@@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() {
p.currentToken.WriteByte('\n')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ p.currentLabelPairs = nil
return
}
escaped = false
@@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
}
func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
}
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
}
func isValidMetricNameStart(b byte) bool {
return isValidLabelNameStart(b) || b == ':'
}
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameContinuation(b, quoted) || b == ':'
}
func isBlankOrTab(b byte) bool {
@@ -775,7 +895,7 @@ func histogramMetricName(name string) string {
func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") {
- return 0, fmt.Errorf("unsupported character in float")
+ return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 80d1fe944..460f554f2 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -14,6 +14,7 @@
package model
import (
+ "errors"
"fmt"
"time"
)
@@ -64,7 +65,7 @@ func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
@@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus {
// Validate checks whether the alert data is inconsistent.
func (a *Alert) Validate() error {
if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
+ return errors.New("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %w", err)
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 3317ce22f..f4a387605 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
-// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
-// names, and iff it's valid UTF-8 if NameValidationScheme is set to
-// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
-// check but a much faster hardcoded implementation.
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
switch NameValidationScheme {
case LegacyValidation:
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
+ return ln.IsValidLegacy()
case UTF8Validation:
return utf8.ValidString(string(ln))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValidLegacy() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
return true
}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
index 481c47b46..abb2c9001 100644
--- a/vendor/github.com/prometheus/common/model/labelset_string.go
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build go1.21
-
package model
import (
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
deleted file mode 100644
index c4212685e..000000000
--- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.21
-
-package model
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// String was optimized using functions not available for go 1.20
-// or lower. We keep the old implementation for compatibility with client_golang.
-// Once client golang drops support for go 1.20 (scheduled for August 2024), this
-// file can be removed.
-func (l LabelSet) String() string {
- labelNames := make([]string, 0, len(l))
- for name := range l {
- labelNames = append(labelNames, string(name))
- }
- sort.Strings(labelNames)
- lstrs := make([]string, 0, len(l))
- for _, name := range labelNames {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
- }
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index eb865e5a5..a6b01755b 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -14,9 +14,11 @@
package model
import (
+ "errors"
"fmt"
"regexp"
"sort"
+ "strconv"
"strings"
"unicode/utf8"
@@ -25,19 +27,34 @@ import (
)
var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
- // in isolation from other components that don't support UTF-8 may result in
- // bugs or other undefined behavior. This value is intended to be set by
- // UTF-8-aware binaries as part of their startup. To avoid need for locking,
- // this value should be set once, ideally in an init(), before multiple
- // goroutines are started.
- NameValidationScheme = LegacyValidation
-
- // NameEscapingScheme defines the default way that names will be
- // escaped when presented to systems that do not support UTF-8 names. If the
- // Content-Type "escaping" term is specified, that will override this value.
- NameEscapingScheme = ValueEncodingEscaping
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
+ NameValidationScheme = UTF8Validation
+
+ // NameEscapingScheme defines the default way that names will be escaped when
+ // presented to systems that do not support UTF-8 names. If the Content-Type
+ // "escaping" term is specified, that will override this value.
+ // NameEscapingScheme should not be set to the NoEscaping value. That string
+ // is used in content negotiation to indicate that a system supports UTF-8 and
+ // has that feature enabled.
+ NameEscapingScheme = UnderscoreEscaping
)
// ValidationScheme is a Go enum for determining how metric and label names will
@@ -45,7 +62,7 @@ var (
type ValidationScheme int
const (
- // LegacyValidation is a setting that requirets that metric and label names
+ // LegacyValidation is a setting that requires that all metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
LegacyValidation ValidationScheme = iota
@@ -161,7 +178,7 @@ func (m Metric) FastFingerprint() Fingerprint {
func IsValidMetricName(n LabelValue) bool {
switch NameValidationScheme {
case LegacyValidation:
- return IsValidLegacyMetricName(n)
+ return IsValidLegacyMetricName(string(n))
case UTF8Validation:
if len(n) == 0 {
return false
@@ -176,7 +193,7 @@ func IsValidMetricName(n LabelValue) bool {
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
-func IsValidLegacyMetricName(n LabelValue) bool {
+func IsValidLegacyMetricName(n string) bool {
if len(n) == 0 {
return false
}
@@ -208,7 +225,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
}
// If the name is nil, copy as-is, don't try to escape.
- if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
+ if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
out.Name = v.Name
} else {
out.Name = proto.String(EscapeName(v.GetName(), scheme))
@@ -230,7 +247,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
for _, l := range m.Label {
if l.GetName() == MetricNameLabel {
- if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
escaped.Label = append(escaped.Label, l)
continue
}
@@ -240,7 +257,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
})
continue
}
- if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
escaped.Label = append(escaped.Label, l)
continue
}
@@ -256,20 +273,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
func metricNeedsEscaping(m *dto.Metric) bool {
for _, l := range m.Label {
- if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
return true
}
- if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ if !IsValidLegacyMetricName(l.GetName()) {
return true
}
}
return false
}
-const (
- lowerhex = "0123456789abcdef"
-)
-
// EscapeName escapes the incoming name according to the provided escaping
// scheme. Depending on the rules of escaping, this may cause no change in the
// string that is returned. (Especially NoEscaping, which by definition is a
@@ -283,7 +296,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
case NoEscaping:
return name
case UnderscoreEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
+ if IsValidLegacyMetricName(name) {
return name
}
for i, b := range name {
@@ -304,31 +317,25 @@ func EscapeName(name string, scheme EscapingScheme) string {
} else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
- escaped.WriteRune('_')
+ escaped.WriteString("__")
}
}
return escaped.String()
case ValueEncodingEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
+ if IsValidLegacyMetricName(name) {
return name
}
escaped.WriteString("U__")
for i, b := range name {
- if isValidLegacyRune(b, i) {
+ if b == '_' {
+ escaped.WriteString("__")
+ } else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else if !utf8.ValidRune(b) {
escaped.WriteString("_FFFD_")
- } else if b < 0x100 {
- escaped.WriteRune('_')
- for s := 4; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
- escaped.WriteRune('_')
- } else if b < 0x10000 {
+ } else {
escaped.WriteRune('_')
- for s := 12; s >= 0; s -= 4 {
- escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
- }
+ escaped.WriteString(strconv.FormatInt(int64(b), 16))
escaped.WriteRune('_')
}
}
@@ -386,8 +393,9 @@ func UnescapeName(name string, scheme EscapingScheme) string {
// We think we are in a UTF-8 code, process it.
var utf8Val uint
for j := 0; i < len(escapedName); j++ {
- // This is too many characters for a utf8 value.
- if j > 4 {
+ // This is too many characters for a utf8 value based on the MaxRune
+ // value of '\U0010FFFF'.
+ if j >= 6 {
return name
}
// Found a closing underscore, convert to a rune, check validity, and append.
@@ -440,7 +448,7 @@ func (e EscapingScheme) String() string {
func ToEscapingScheme(s string) (EscapingScheme, error) {
if s == "" {
- return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
+ return NoEscaping, errors.New("got empty string instead of escaping scheme")
}
switch s {
case AllowUTF8:
@@ -452,6 +460,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
case EscapeValues:
return ValueEncodingEscaping, nil
default:
- return NoEscaping, fmt.Errorf("unknown format scheme " + s)
+ return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
}
}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index 910b0b71f..8f91a9702 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"regexp"
"time"
@@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
}
if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
+ return errors.New("label name in matcher must not be empty")
}
if m.IsRegex {
if _, err := regexp.Compile(m.Value); err != nil {
@@ -77,7 +78,7 @@ type Silence struct {
// Validate returns true iff all fields of the silence have valid values.
func (s *Silence) Validate() error {
if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
+ return errors.New("at least one matcher required")
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
@@ -85,22 +86,22 @@ func (s *Silence) Validate() error {
}
}
if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
+ return errors.New("end time missing")
}
if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
+ return errors.New("creator information missing")
}
if s.Comment == "" {
- return fmt.Errorf("comment missing")
+ return errors.New("comment missing")
}
if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
+ return errors.New("creation timestamp missing")
}
return nil
}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
index ae35cc2ab..6bfc757d1 100644
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"math"
"strconv"
@@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) {
// UnmarshalJSON implements json.Unmarshaler.
func (v *SampleValue) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
+ return errors.New("sample value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
index 54bb038cf..895e6a3e8 100644
--- a/vendor/github.com/prometheus/common/model/value_histogram.go
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -15,6 +15,7 @@ package model
import (
"encoding/json"
+ "errors"
"fmt"
"strconv"
"strings"
@@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) {
func (v *FloatString) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("float value must be a quoted string")
+ return errors.New("float value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
@@ -141,7 +142,7 @@ type SampleHistogramPair struct {
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
if s.Histogram == nil {
- return nil, fmt.Errorf("histogram is nil")
+ return nil, errors.New("histogram is nil")
}
t, err := json.Marshal(s.Timestamp)
if err != nil {
@@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
}
if s.Histogram == nil {
- return fmt.Errorf("histogram is null")
+ return errors.New("histogram is null")
}
return nil
}
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 126df9e67..b43e09f68 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -2,7 +2,10 @@
linters:
enable:
- errcheck
+ - forbidigo
- godot
+ - gofmt
+ - goimports
- gosimple
- govet
- ineffassign
@@ -12,11 +15,17 @@ linters:
- testifylint
- unused
-linter-settings:
+linters-settings:
+ forbidigo:
+ forbid:
+ - p: ^fmt\.Print.*$
+ msg: Do not commit print statements.
godot:
capital: true
exclude:
# Ignore "See: URL"
- 'See:'
+ goimports:
+ local-prefixes: github.com/prometheus/procfs
misspell:
locale: US
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 161729235..cbb5d8638 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
+GOLANGCI_LINT_VERSION ?= v1.60.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -275,3 +275,9 @@ $(1)_precheck:
exit 1; \
fi
endef
+
+govulncheck: install-govulncheck
+ govulncheck ./...
+
+install-govulncheck:
+ command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 1224816c2..0718239cf 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
```bash
rm -rf testdata/fixtures
make test
```
-Next, make the required changes to the extracted files in the `fixtures` directory. When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index cdcc8a7cc..2e5334415 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -23,9 +23,9 @@ import (
// Learned from include/uapi/linux/if_arp.h.
const (
- // completed entry (ha valid).
+ // Completed entry (ha valid).
ATFComplete = 0x02
- // permanent entry.
+ // Permanent entry.
ATFPermanent = 0x04
// Publish entry.
ATFPublish = 0x08
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 4980c875b..9bdaccc7c 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -24,8 +24,14 @@ type FS struct {
isReal bool
}
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+ // DefaultMountPoint is the common mount point of the proc filesystem.
+ DefaultMountPoint = fs.DefaultProcMountPoint
+
+ // SectorSize represents the size of a sector in bytes.
+ // It is specific to Linux block I/O operations.
+ SectorSize = 512
+)
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 134767d69..1b5bdbdf8 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -17,7 +17,7 @@
package procfs
// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
return true, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index cf2e3eaa0..7db863307 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
+ // Number of page stores canceled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
- // Number of async ops cancelled
+ // Number of async ops canceled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
- // Number of async ops initialised
+ // Number of async ops initialized
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 3c18c7610..3a43e8391 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -28,6 +28,9 @@ const (
// DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+ // DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+ DefaultSelinuxMountPoint = "/sys/fs/selinux"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 14272dc78..5a7d2df06 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,6 +14,7 @@
package util
import (
+ "errors"
"os"
"strconv"
"strings"
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
}
return &truth
}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ hexString := strings.TrimSpace(string(data))
+ if !strings.HasPrefix(hexString, "0x") {
+ return 0, errors.New("invalid format: hex string does not start with '0x'")
+ }
+ return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 75a3b6c81..b6c8d1a57 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -45,11 +45,11 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
- // kernel version >= 4.14 MaxLen
+ // Kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
- // kernel version <= 4.2 MinLen
+ // Kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 000000000..f50b38e35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+ netDevSNMP6 := make(NetDevSNMP6)
+
+ // The net/dev_snmp6 folders contain one file per interface
+ ifaceFiles, err := os.ReadDir(dir)
+ if err != nil {
+ // On systems with IPv6 disabled, this directory won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return netDevSNMP6, err
+ }
+ return netDevSNMP6, err
+ }
+
+ for _, iFaceFile := range ifaceFiles {
+ f, err := os.Open(dir + "/" + iFaceFile.Name())
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ defer f.Close()
+
+ netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ }
+
+ return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+ m := make(map[string]uint64)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ key, val := stat[0], stat[1]
+
+ // Expect stat name to contain "6" or be "ifIndex"
+ if strings.Contains(key, "6") || key == "ifIndex" {
+ v, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ m[key] = v
+ }
+ }
+ return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index b70f1fc7a..19e3378f7 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@ import (
)
const (
- // readLimit is used by io.LimitReader while reading the content of the
+ // Maximum size limit used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -50,12 +50,12 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
+ // Drops shows the total number of dropped packets of all UDP sockets.
Drops *uint64
}
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // A single line parser for fields from /proc/net/{t,u}dp{,6}.
+ // Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 527762955..0396d7201 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@ type (
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index d868cebda..d7e0cacb4 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
return &nu, nil
}
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+ if l < minFields {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
}
// Field offsets are as follows:
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
}
// Path field is optional.
- if l > min {
+ if l > minFields {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index daeed7f57..4a64347c0 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -24,7 +24,7 @@ import (
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f34971..d15b66ddb 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
"read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
+ "cancelled_write_bytes: %d\n" //nolint:misspell
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 09060e820..9a297afcf 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -19,7 +19,6 @@ package procfs
import (
"bufio"
"errors"
- "fmt"
"os"
"regexp"
"strconv"
@@ -29,7 +28,7 @@ import (
)
var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
+ // Match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
- fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index a055197c6..dd8aa5688 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
}
}
case "NSpid":
- s.NSpids = calcNSPidsList(vString)
+ nspids, err := calcNSPidsList(vString)
+ if err != nil {
+ return err
+ }
+ s.NSpids = nspids
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
return g
}
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+ s := strings.Split(nspidsString, "\t")
var nspids []uint64
for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
+ nspid, err := strconv.ParseUint(nspid, 10, 64)
+ if err != nil {
+ return nil, err
}
nspids = append(nspids, nspid)
}
- return nspids
+ return nspids, nil
}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go
deleted file mode 100644
index 2063b6f76..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
-
- "go.opencensus.io/trace"
- "google.golang.org/grpc/stats"
-)
-
-// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
-// traces. Use with gRPC clients only.
-type ClientHandler struct {
- // StartOptions allows configuring the StartOptions used to create new spans.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = c.traceTagRPC(ctx, rti)
- ctx = c.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
deleted file mode 100644
index fb3c19d6b..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ClientHandler:
-var (
- ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
- ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
- ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
- ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless)
- ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
-)
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ClientSentBytesPerRPCView = &view.View{
- Measure: ClientSentBytesPerRPC,
- Name: "grpc.io/client/sent_bytes_per_rpc",
- Description: "Distribution of bytes sent per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientReceivedBytesPerRPCView = &view.View{
- Measure: ClientReceivedBytesPerRPC,
- Name: "grpc.io/client/received_bytes_per_rpc",
- Description: "Distribution of bytes received per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientRoundtripLatencyView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/roundtrip_latency",
- Description: "Distribution of round-trip latency, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- // Purposely reuses the count from `ClientRoundtripLatency`, tagging
- // with method and status to result in ClientCompletedRpcs.
- ClientCompletedRPCsView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- Aggregation: view.Count(),
- }
-
- ClientStartedRPCsView = &view.View{
- Measure: ClientStartedRPCs,
- Name: "grpc.io/client/started_rpcs",
- Description: "Number of started client RPCs.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: view.Count(),
- }
-
- ClientSentMessagesPerRPCView = &view.View{
- Measure: ClientSentMessagesPerRPC,
- Name: "grpc.io/client/sent_messages_per_rpc",
- Description: "Distribution of sent messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientReceivedMessagesPerRPCView = &view.View{
- Measure: ClientReceivedMessagesPerRPC,
- Name: "grpc.io/client/received_messages_per_rpc",
- Description: "Distribution of received messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientServerLatencyView = &view.View{
- Measure: ClientServerLatency,
- Name: "grpc.io/client/server_latency",
- Description: "Distribution of server latency as viewed by client, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-var DefaultClientViews = []*view.View{
- ClientSentBytesPerRPCView,
- ClientReceivedBytesPerRPCView,
- ClientRoundtripLatencyView,
- ClientCompletedRPCsView,
-}
-
-// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
deleted file mode 100644
index b36349820..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "time"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the tag.Map populated by the application code, serializes
-// its tags into the GRPC metadata in order to be sent to the server.
-func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Info("clientHandler.TagRPC called with nil info.")
- }
- return ctx
- }
-
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- ts := tag.FromContext(ctx)
- if ts != nil {
- encoded := tag.Encode(ts)
- ctx = stats.SetTags(ctx, encoded)
- }
-
- return context.WithValue(ctx, rpcDataKey, d)
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
deleted file mode 100644
index 1370323fb..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ocgrpc contains OpenCensus stats and trace
-// integrations for gRPC.
-//
-// Use ServerHandler for servers and ClientHandler for clients.
-package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go
deleted file mode 100644
index 8a53e0972..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
-
- "google.golang.org/grpc/stats"
-
- "go.opencensus.io/trace"
-)
-
-// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
-// traces. Use with gRPC servers.
-//
-// When installed (see Example), tracing metadata is read from inbound RPCs
-// by default. If no tracing metadata is present, or if the tracing metadata is
-// present but the SpanContext isn't sampled, then a new trace may be started
-// (as determined by Sampler).
-type ServerHandler struct {
- // IsPublicEndpoint may be set to true to always start a new trace around
- // each RPC. Any SpanContext in the RPC metadata will be added as a linked
- // span instead of making it the parent of the span created around the
- // server RPC.
- //
- // Be aware that if you leave this false (the default) on a public-facing
- // server, callers will be able to send tracing metadata in gRPC headers
- // and trigger traces in your backend.
- IsPublicEndpoint bool
-
- // StartOptions to use for to spans started around RPCs handled by this server.
- //
- // These will apply even if there is tracing metadata already
- // present on the inbound RPC but the SpanContext is not sampled. This
- // ensures that each service has some opportunity to be traced. If you would
- // like to not add any additional traces for this gRPC service, set:
- //
- // StartOptions.Sampler = trace.ProbabilitySampler(0.0)
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-var _ stats.Handler = (*ServerHandler)(nil)
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = s.traceTagRPC(ctx, rti)
- ctx = s.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
deleted file mode 100644
index fe0e97108..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ServerHandler:
-var (
- ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
- ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
- ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless)
- ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
-)
-
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ServerReceivedBytesPerRPCView = &view.View{
- Name: "grpc.io/server/received_bytes_per_rpc",
- Description: "Distribution of received bytes per RPC, by method.",
- Measure: ServerReceivedBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerSentBytesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_bytes_per_rpc",
- Description: "Distribution of total sent bytes per RPC, by method.",
- Measure: ServerSentBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "grpc.io/server/server_latency",
- Description: "Distribution of server latency in milliseconds, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerLatency,
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- // Purposely reuses the count from `ServerLatency`, tagging
- // with method and status to result in ServerCompletedRpcs.
- ServerCompletedRPCsView = &view.View{
- Name: "grpc.io/server/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-
- ServerStartedRPCsView = &view.View{
- Measure: ServerStartedRPCs,
- Name: "grpc.io/server/started_rpcs",
- Description: "Number of started server RPCs.",
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: view.Count(),
- }
-
- ServerReceivedMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/received_messages_per_rpc",
- Description: "Distribution of messages received count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerReceivedMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ServerSentMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_messages_per_rpc",
- Description: "Distribution of messages sent count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerSentMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-)
-
-// DefaultServerViews are the default server views provided by this package.
-var DefaultServerViews = []*view.View{
- ServerReceivedBytesPerRPCView,
- ServerSentBytesPerRPCView,
- ServerLatencyView,
- ServerCompletedRPCsView,
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
deleted file mode 100644
index afcef023a..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "context"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
-// it and creates a new tag.Map and puts them into the returned context.
-func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("opencensus: TagRPC called with nil info.")
- }
- return ctx
- }
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- propagated := h.extractPropagatedTags(ctx)
- ctx = tag.NewContext(ctx, propagated)
- ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
- return context.WithValue(ctx, rpcDataKey, d)
-}
-
-// extractPropagatedTags creates a new tag map containing the tags extracted from the
-// gRPC metadata.
-func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
- buf := stats.Tags(ctx)
- if buf == nil {
- return nil
- }
- propagated, err := tag.Decode(buf)
- if err != nil {
- if grpclog.V(2) {
- grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
- }
- return nil
- }
- return propagated
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
deleted file mode 100644
index 9cb27320c..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- ocstats "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-type grpcInstrumentationKey string
-
-// rpcData holds the instrumentation RPC data that is needed between the start
-// and end of an call. It holds the info that this package needs to keep track
-// of between the various GRPC events.
-type rpcData struct {
- // reqCount and respCount has to be the first words
- // in order to be 64-aligned on 32-bit architectures.
- sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
-
- // startTime represents the time at which TagRPC was invoked at the
- // beginning of an RPC. It is an appoximation of the time when the
- // application code invoked GRPC code.
- startTime time.Time
- method string
-}
-
-// The following variables define the default hard-coded auxiliary data used by
-// both the default GRPC client and GRPC server metrics.
-var (
- DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
- DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
-)
-
-// Server tags are applied to the context used to process each RPC, as well as
-// the measures at the end of each RPC.
-var (
- KeyServerMethod = tag.MustNewKey("grpc_server_method")
- KeyServerStatus = tag.MustNewKey("grpc_server_status")
-)
-
-// Client tags are applied to measures at the end of each RPC.
-var (
- KeyClientMethod = tag.MustNewKey("grpc_client_method")
- KeyClientStatus = tag.MustNewKey("grpc_client_status")
-)
-
-var (
- rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
-)
-
-func methodName(fullname string) string {
- return strings.TrimLeft(fullname, "/")
-}
-
-// statsHandleRPC processes the RPC events.
-func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
- switch st := s.(type) {
- case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
- // do nothing for client
- case *stats.Begin:
- handleRPCBegin(ctx, st)
- case *stats.OutPayload:
- handleRPCOutPayload(ctx, st)
- case *stats.InPayload:
- handleRPCInPayload(ctx, st)
- case *stats.End:
- handleRPCEnd(ctx, st)
- default:
- grpclog.Infof("unexpected stats: %T", st)
- }
-}
-
-func handleRPCBegin(ctx context.Context, s *stats.Begin) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- }
-
- if s.IsClient() {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
- ocstats.WithMeasurements(ClientStartedRPCs.M(1)))
- } else {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
- ocstats.WithMeasurements(ServerStartedRPCs.M(1)))
- }
-}
-
-func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.sentBytes, int64(s.Length))
- atomic.AddInt64(&d.sentCount, 1)
-}
-
-func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.recvBytes, int64(s.Length))
- atomic.AddInt64(&d.recvCount, 1)
-}
-
-func handleRPCEnd(ctx context.Context, s *stats.End) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- elapsedTime := time.Since(d.startTime)
-
- var st string
- if s.Error != nil {
- s, ok := status.FromError(s.Error)
- if ok {
- st = statusCodeToString(s)
- }
- } else {
- st = "OK"
- }
-
- latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
- attachments := getSpanCtxAttachment(ctx)
- if s.Client {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyClientMethod, methodName(d.method)),
- tag.Upsert(KeyClientStatus, st)),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ClientRoundtripLatency.M(latencyMillis)))
- } else {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyServerStatus, st),
- ),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ServerLatency.M(latencyMillis)))
- }
-}
-
-func statusCodeToString(s *status.Status) string {
- // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
- switch c := s.Code(); c {
- case codes.OK:
- return "OK"
- case codes.Canceled:
- return "CANCELLED"
- case codes.Unknown:
- return "UNKNOWN"
- case codes.InvalidArgument:
- return "INVALID_ARGUMENT"
- case codes.DeadlineExceeded:
- return "DEADLINE_EXCEEDED"
- case codes.NotFound:
- return "NOT_FOUND"
- case codes.AlreadyExists:
- return "ALREADY_EXISTS"
- case codes.PermissionDenied:
- return "PERMISSION_DENIED"
- case codes.ResourceExhausted:
- return "RESOURCE_EXHAUSTED"
- case codes.FailedPrecondition:
- return "FAILED_PRECONDITION"
- case codes.Aborted:
- return "ABORTED"
- case codes.OutOfRange:
- return "OUT_OF_RANGE"
- case codes.Unimplemented:
- return "UNIMPLEMENTED"
- case codes.Internal:
- return "INTERNAL"
- case codes.Unavailable:
- return "UNAVAILABLE"
- case codes.DataLoss:
- return "DATA_LOSS"
- case codes.Unauthenticated:
- return "UNAUTHENTICATED"
- default:
- return "CODE_" + strconv.FormatInt(int64(c), 10)
- }
-}
-
-func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments {
- attachments := map[string]interface{}{}
- span := trace.FromContext(ctx)
- if span == nil {
- return attachments
- }
- spanCtx := span.SpanContext()
- if spanCtx.IsSampled() {
- attachments[metricdata.AttachmentKeySpanContext] = spanCtx
- }
- return attachments
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
deleted file mode 100644
index 61bc543d0..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
- "strings"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const traceContextKey = "grpc-trace-bin"
-
-// TagRPC creates a new trace span for the client side of the RPC.
-//
-// It returns ctx with the new trace span added and a serialization of the
-// SpanContext added to the outgoing gRPC metadata.
-func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSampler(c.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
- traceContextBinary := propagation.Binary(span.SpanContext())
- return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
-}
-
-// TagRPC creates a new trace span for the server side of the RPC.
-//
-// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
-// it finds one, uses that SpanContext as the parent context of the new span.
-//
-// It returns ctx, with the new trace span added.
-func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- md, _ := metadata.FromIncomingContext(ctx)
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- traceContext := md[traceContextKey]
- var (
- parent trace.SpanContext
- haveParent bool
- )
- if len(traceContext) > 0 {
- // Metadata with keys ending in -bin are actually binary. They are base64
- // encoded before being put on the wire, see:
- // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
- traceContextBinary := []byte(traceContext[0])
- parent, haveParent = propagation.FromBinary(traceContextBinary)
- if haveParent && !s.IsPublicEndpoint {
- ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler),
- )
- return ctx
- }
- }
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler))
- if haveParent {
- span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
- }
- return ctx
-}
-
-func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
- span := trace.FromContext(ctx)
- // TODO: compressed and uncompressed sizes are not populated in every message.
- switch rs := rs.(type) {
- case *stats.Begin:
- span.AddAttributes(
- trace.BoolAttribute("Client", rs.Client),
- trace.BoolAttribute("FailFast", rs.FailFast))
- case *stats.InPayload:
- span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
- case *stats.OutPayload:
- span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
- case *stats.End:
- if rs.Error != nil {
- s, ok := status.FromError(rs.Error)
- if ok {
- span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
- } else {
- span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
- }
- }
- span.End()
- }
-}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
new file mode 100644
index 000000000..773c9b643
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+# Contributing to go.opentelemetry.io/auto/sdk
+
+The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK.
+It is designed to be:
+
+0. An OpenTelemetry compliant SDK
+1. Instrumented by auto-instrumentation (serializable into OTLP JSON)
+2. Lightweight
+3. User-friendly
+
+These design choices are listed in the order of their importance.
+
+The primary design goal of this module is to be an OpenTelemetry SDK.
+This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`.
+
+Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument.
+The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP.
+This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent.
+
+Outside of these first two goals, the intended use becomes relevant.
+This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running.
+Because of this, this package needs to not add unnecessary dependencies to that API.
+Ideally, it adds none.
+It also needs to operate efficiently.
+
+Finally, this module is designed to be user-friendly to Go development.
+It hides complexity in order to provide simpler APIs when the previous goals can all still be met.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
new file mode 100644
index 000000000..088d19a6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
@@ -0,0 +1,15 @@
+# Versioning
+
+This document describes the versioning policy for this module.
+This policy is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used.
+ * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+ * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path.
+
+* GitHub releases will be made for all releases.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go
new file mode 100644
index 000000000..ad73d8cb9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package sdk provides an auto-instrumentable OpenTelemetry SDK.
+
+An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the
+process running this SDK. In that case, all telemetry the SDK produces will be
+processed and handled by that [go.opentelemetry.io/auto.Instrumentation].
+
+By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to
+auto-instrument the SDK, the SDK will not generate any telemetry.
+*/
+package sdk
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
new file mode 100644
index 000000000..af6ef171f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
new file mode 100644
index 000000000..949e2165c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
new file mode 100644
index 000000000..e854d7e84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
new file mode 100644
index 000000000..29e629d66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
new file mode 100644
index 000000000..cecad8bae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
new file mode 100644
index 000000000..b6f2e28d4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
new file mode 100644
index 000000000..a13a6b733
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -0,0 +1,456 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT),
+ EndTime: uint64(endT),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.StartTime = time.Unix(0, int64(val.Uint64()))
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.EndTime = time.Unix(0, int64(val.Uint64()))
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ SpanKindInternal SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ SpanKindServer SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ SpanKindClient SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ se.Time = time.Unix(0, int64(val.Uint64()))
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
new file mode 100644
index 000000000..1217776ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // The default status.
+ StatusCodeUnset StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // The Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return ""
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
new file mode 100644
index 000000000..69a348f0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
new file mode 100644
index 000000000..0dd01b063
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -0,0 +1,452 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=ValueKind -trimprefix=ValueKind
+
+package telemetry
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return ""
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{num: uint64(v), any: ValueKindInt64}
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return fmt.Sprint(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return ""
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)})
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go
new file mode 100644
index 000000000..86babf1a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "log/slog"
+ "os"
+ "strconv"
+)
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ slog.Warn(
+ "invalid limit environment variable",
+ "error", err,
+ "key", key,
+ "value", strV,
+ )
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
new file mode 100644
index 000000000..6ebea12a9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -0,0 +1,432 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type span struct {
+ noop.Span
+
+ spanContext trace.SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *span) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *span) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *span) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ s.span.DroppedAttrs += uint32(len(attrs))
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ if limit == 0 {
+ return nil, uint32(len(attrs))
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ limit = min(len(attrs), limit)
+ return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *span) End(opts ...trace.SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *span) end(opts []trace.SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := trace.NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*span) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *span) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *span) AddEvent(name string, opts ...trace.EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *span) AddLink(link trace.Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []trace.Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link trace.Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *span) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() }
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
new file mode 100644
index 000000000..cbcfabde3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -0,0 +1,124 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type tracer struct {
+ noop.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ trace.Tracer = tracer{}
+
+func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ var psc trace.SpanContext
+ sampled := true
+ span := new(span)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &span.spanContext)
+
+ span.sampled.Store(sampled)
+
+ ctx = trace.ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := trace.NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *tracer) start(
+ ctx context.Context,
+ spanPtr *span,
+ psc *trace.SpanContext,
+ sampled *bool,
+ sc *trace.SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
+
+func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ span.DroppedLinks = uint32(len(links))
+ } else {
+ if limit > 0 {
+ n := max(len(links)-limit, 0)
+ span.DroppedLinks = uint32(n)
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind trace.SpanKind) telemetry.SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case trace.SpanKindServer:
+ return telemetry.SpanKindServer
+ case trace.SpanKindClient:
+ return telemetry.SpanKindClient
+ case trace.SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case trace.SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
new file mode 100644
index 000000000..dbc477a59
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+// TracerProvider returns an auto-instrumentable [trace.TracerProvider].
+//
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func TracerProvider() trace.TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(tracerProvider)
+
+type tracerProvider struct{ noop.TracerProvider }
+
+var _ trace.TracerProvider = tracerProvider{}
+
+func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ cfg := trace.NewTracerConfig(opts...)
+ return tracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
index 06282ce79..9e87fb4bb 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -1,20 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
+ "google.golang.org/grpc/stats"
+
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
@@ -31,18 +22,28 @@ const (
GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
)
-// Filter is a predicate used to determine whether a given request in
-// interceptor info should be traced. A Filter must return true if
+// InterceptorFilter is a predicate used to determine whether a given request in
+// interceptor info should be instrumented. A InterceptorFilter must return true if
// the request should be traced.
-type Filter func(*InterceptorInfo) bool
+//
+// Deprecated: Use stats handlers instead.
+type InterceptorFilter func(*InterceptorInfo) bool
+
+// Filter is a predicate used to determine whether a given request in
+// should be instrumented by the attached RPC tag info.
+// A Filter must return true if the request should be instrumented.
+type Filter func(*stats.RPCTagInfo) bool
// config is a group of options for this instrumentation.
type config struct {
- Filter Filter
- Propagators propagation.TextMapPropagator
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
- SpanStartOptions []trace.SpanStartOption
+ Filter Filter
+ InterceptorFilter InterceptorFilter
+ Propagators propagation.TextMapPropagator
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ SpanStartOptions []trace.SpanStartOption
+ SpanAttributes []attribute.KeyValue
+ MetricAttributes []attribute.KeyValue
ReceivedEvent bool
SentEvent bool
@@ -50,11 +51,11 @@ type config struct {
tracer trace.Tracer
meter metric.Meter
- rpcDuration metric.Float64Histogram
- rpcRequestSize metric.Int64Histogram
- rpcResponseSize metric.Int64Histogram
- rpcRequestsPerRPC metric.Int64Histogram
- rpcResponsesPerRPC metric.Int64Histogram
+ rpcDuration metric.Float64Histogram
+ rpcInBytes metric.Int64Histogram
+ rpcOutBytes metric.Int64Histogram
+ rpcInMessages metric.Int64Histogram
+ rpcOutMessages metric.Int64Histogram
}
// Option applies an option value for a config.
@@ -95,46 +96,64 @@ func newConfig(opts []Option, role string) *config {
}
}
- c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
+ rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size",
metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
- if c.rpcRequestSize == nil {
- c.rpcRequestSize = noop.Int64Histogram{}
+ if rpcRequestSize == nil {
+ rpcRequestSize = noop.Int64Histogram{}
}
}
- c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
+ rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size",
metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
metric.WithUnit("By"))
if err != nil {
otel.Handle(err)
- if c.rpcResponseSize == nil {
- c.rpcResponseSize = noop.Int64Histogram{}
+ if rpcResponseSize == nil {
+ rpcResponseSize = noop.Int64Histogram{}
}
}
- c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
+ rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
- if c.rpcRequestsPerRPC == nil {
- c.rpcRequestsPerRPC = noop.Int64Histogram{}
+ if rpcRequestsPerRPC == nil {
+ rpcRequestsPerRPC = noop.Int64Histogram{}
}
}
- c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
+ rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
metric.WithUnit("{count}"))
if err != nil {
otel.Handle(err)
- if c.rpcResponsesPerRPC == nil {
- c.rpcResponsesPerRPC = noop.Int64Histogram{}
+ if rpcResponsesPerRPC == nil {
+ rpcResponsesPerRPC = noop.Int64Histogram{}
}
}
+ switch role {
+ case "client":
+ c.rpcInBytes = rpcResponseSize
+ c.rpcInMessages = rpcResponsesPerRPC
+ c.rpcOutBytes = rpcRequestSize
+ c.rpcOutMessages = rpcRequestsPerRPC
+ case "server":
+ c.rpcInBytes = rpcRequestSize
+ c.rpcInMessages = rpcRequestsPerRPC
+ c.rpcOutBytes = rpcResponseSize
+ c.rpcOutMessages = rpcResponsesPerRPC
+ default:
+ c.rpcInBytes = noop.Int64Histogram{}
+ c.rpcInMessages = noop.Int64Histogram{}
+ c.rpcOutBytes = noop.Int64Histogram{}
+ c.rpcOutMessages = noop.Int64Histogram{}
+ }
+
return c
}
@@ -163,15 +182,30 @@ func (o tracerProviderOption) apply(c *config) {
// WithInterceptorFilter returns an Option to use the request filter.
//
// Deprecated: Use stats handlers instead.
-func WithInterceptorFilter(f Filter) Option {
+func WithInterceptorFilter(f InterceptorFilter) Option {
return interceptorFilterOption{f: f}
}
type interceptorFilterOption struct {
- f Filter
+ f InterceptorFilter
}
func (o interceptorFilterOption) apply(c *config) {
+ if o.f != nil {
+ c.InterceptorFilter = o.f
+ }
+}
+
+// WithFilter returns an Option to use the request filter.
+func WithFilter(f Filter) Option {
+ return filterOption{f: f}
+}
+
+type filterOption struct {
+ f Filter
+}
+
+func (o filterOption) apply(c *config) {
if o.f != nil {
c.Filter = o.f
}
@@ -243,3 +277,29 @@ func (o spanStartOption) apply(c *config) {
func WithSpanOptions(opts ...trace.SpanStartOption) Option {
return spanStartOption{opts}
}
+
+type spanAttributesOption struct{ a []attribute.KeyValue }
+
+func (o spanAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.SpanAttributes = o.a
+ }
+}
+
+// WithSpanAttributes returns an Option to add custom attributes to the spans.
+func WithSpanAttributes(a ...attribute.KeyValue) Option {
+ return spanAttributesOption{a: a}
+}
+
+type metricAttributesOption struct{ a []attribute.KeyValue }
+
+func (o metricAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.MetricAttributes = o.a
+ }
+}
+
+// WithMetricAttributes returns an Option to add custom attributes to the metrics.
+func WithMetricAttributes(a ...attribute.KeyValue) Option {
+ return metricAttributesOption{a: a}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
index 958dcd87a..b8b836b00 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
index 3b487a936..7d5ed0580 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
@@ -18,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
import (
"context"
+ "errors"
"io"
"net"
"strconv"
@@ -59,7 +49,7 @@ var (
)
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
-// for use in a grpc.Dial call.
+// for use in a grpc.NewClient call.
//
// Deprecated: Use [NewClientHandler] instead.
func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
@@ -81,7 +71,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
Method: method,
Type: UnaryClient,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return invoker(ctx, method, req, reply, cc, callOpts...)
}
@@ -147,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error {
if err == nil && !w.desc.ServerStreams {
w.endSpan(nil)
- } else if err == io.EOF {
+ } else if errors.Is(err, io.EOF) {
w.endSpan(nil)
} else if err != nil {
w.endSpan(err)
@@ -196,7 +186,7 @@ func (w *clientStream) CloseSend() error {
return err
}
-func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
+func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
return &clientStream{
ClientStream: s,
span: span,
@@ -219,7 +209,7 @@ func (w *clientStream) endSpan(err error) {
}
// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
-// for use in a grpc.Dial call.
+// for use in a grpc.NewClient call.
//
// Deprecated: Use [NewClientHandler] instead.
func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
@@ -241,7 +231,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
Method: method,
Type: StreamClient,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return streamer(ctx, desc, cc, method, callOpts...)
}
@@ -270,7 +260,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
span.End()
return s, err
}
- stream := wrapClientStream(ctx, s, desc, span, cfg)
+ stream := wrapClientStream(s, desc, span, cfg)
return stream, nil
}
}
@@ -296,7 +286,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
UnaryServerInfo: info,
Type: UnaryServer,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return handler(ctx, req)
}
@@ -344,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
- cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+ cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
return resp, err
}
@@ -422,7 +412,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
StreamServerInfo: info,
Type: StreamServer,
}
- if cfg.Filter != nil && !cfg.Filter(i) {
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
return handler(srv, wrapServerStream(ctx, ss, cfg))
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
index f6116946b..b62f7cd7c 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
index cf32a9e97..bef07b7a3 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
index f585fb6ae..3aa37915d 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
index b65fab308..409c621b7 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
index 73d2b8b6b..c01cb897c 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
@@ -24,20 +13,22 @@ import (
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
)
type gRPCContextKey struct{}
type gRPCContext struct {
- messagesReceived int64
- messagesSent int64
- metricAttrs []attribute.KeyValue
+ inMessages int64
+ outMessages int64
+ metricAttrs []attribute.KeyValue
+ record bool
}
type serverHandler struct {
@@ -72,11 +63,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attrs...),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
)
gctx := gRPCContext{
- metricAttrs: attrs,
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
}
return context.WithValue(ctx, gRPCContextKey{}, &gctx)
}
@@ -108,11 +103,15 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
ctx,
name,
trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attrs...),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
)
gctx := gRPCContext{
- metricAttrs: attrs,
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
}
return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
@@ -141,6 +140,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool
gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
if gctx != nil {
+ if !gctx.record {
+ return
+ }
metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
metricAttrs = append(metricAttrs, gctx.metricAttrs...)
}
@@ -149,8 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool
case *stats.Begin:
case *stats.InPayload:
if gctx != nil {
- messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
- c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ messageId = atomic.AddInt64(&gctx.inMessages, 1)
+ c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
}
if c.ReceivedEvent {
@@ -165,8 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool
}
case *stats.OutPayload:
if gctx != nil {
- messageId = atomic.AddInt64(&gctx.messagesSent, 1)
- c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
+ messageId = atomic.AddInt64(&gctx.outMessages, 1)
+ c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
}
if c.SentEvent {
@@ -203,14 +205,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool
span.End()
metricAttrs = append(metricAttrs, rpcStatusAttr)
+ // Allocate vararg slice once.
+ recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))}
// Use floating point division here for higher precision (instead of Millisecond method).
+ // Measure right before calling Record() to capture as much elapsed time as possible.
elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
- c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
+ c.rpcDuration.Record(ctx, elapsedTime, recordOpts...)
if gctx != nil {
- c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...))
- c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...))
+ c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...)
+ c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...)
}
default:
return
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
index d633c4bef..80e5f2f6f 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -1,22 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// Version is the current release version of the gRPC instrumentation.
func Version() string {
- return "0.49.0"
+ return "0.59.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
index 92b8cf73c..b25641c55 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
@@ -23,13 +12,13 @@ import (
)
// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
-// Please be careful of intitialization order - for example, if you change
+// Please be careful of initialization order - for example, if you change
// the global propagator, the DefaultClient might still be using the old one.
var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
// Get is a convenient replacement for http.Get that adds a span around the request.
func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
if err != nil {
return nil, err
}
@@ -38,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error)
// Head is a convenient replacement for http.Head that adds a span around the request.
func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
+ req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
if err != nil {
return nil, err
}
@@ -47,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error
// Post is a convenient replacement for http.Post that adds a span around the request.
func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body)
if err != nil {
return nil, err
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
index cabf645a5..a83a02627 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
@@ -29,20 +18,6 @@ const (
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
)
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-// Client HTTP metrics.
-const (
- clientRequestSize = "http.client.request.size" // Outgoing request bytes total
- clientResponseSize = "http.client.response.size" // Outgoing response bytes total
- clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
-)
-
// Filter is a predicate used to determine whether a given http.request should
// be traced. A Filter must return true if the request should be traced.
type Filter func(*http.Request) bool
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
index a1b5b5e5a..a01bfafbe 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
@@ -19,6 +8,8 @@ import (
"net/http"
"net/http/httptrace"
+ "go.opentelemetry.io/otel/attribute"
+
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
@@ -44,8 +35,9 @@ type config struct {
SpanNameFormatter func(string, *http.Request) string
ClientTrace func(context.Context) *httptrace.ClientTrace
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ MetricAttributesFn func(*http.Request) []attribute.KeyValue
}
// Option interface used for setting optional config properties.
@@ -111,7 +103,7 @@ func WithPublicEndpoint() Option {
})
}
-// WithPublicEndpointFn runs with every request, and allows conditionnally
+// WithPublicEndpointFn runs with every request, and allows conditionally
// configuring the Handler to link the span with an incoming span context. If
// this option is not provided or returns false, then the association is a
// child association instead of a link.
@@ -205,3 +197,11 @@ func WithServerName(server string) Option {
c.ServerName = server
})
}
+
+// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
+// These attributes will be included in metrics for every request.
+func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
+ return optionFunc(func(c *config) {
+ c.MetricAttributesFn = metricAttributesFn
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
index 38c7f01c7..56b24b982 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package otelhttp provides an http.Handler and functions that are intended
// to be used to add tracing by wrapping existing handlers (with Handler) and
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index 1fc15019e..3ea05d019 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -1,32 +1,19 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
- "io"
"net/http"
"time"
"github.com/felixge/httpsnoop"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
)
@@ -35,20 +22,18 @@ type middleware struct {
operation string
server string
- tracer trace.Tracer
- meter metric.Meter
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- readEvent bool
- writeEvent bool
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- publicEndpoint bool
- publicEndpointFn func(*http.Request) bool
-
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
+ tracer trace.Tracer
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ readEvent bool
+ writeEvent bool
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ publicEndpoint bool
+ publicEndpointFn func(*http.Request) bool
+ metricAttributesFn func(*http.Request) []attribute.KeyValue
+
+ semconv semconv.HTTPServer
}
func defaultHandlerFormatter(operation string, _ *http.Request) string {
@@ -76,7 +61,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
c := newConfig(append(defaultOpts, opts...)...)
h.configure(c)
- h.createMeasures()
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -87,7 +71,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
func (h *middleware) configure(c *config) {
h.tracer = c.Tracer
- h.meter = c.Meter
h.propagators = c.Propagators
h.spanStartOptions = c.SpanStartOptions
h.readEvent = c.ReadEvent
@@ -97,36 +80,8 @@ func (h *middleware) configure(c *config) {
h.publicEndpoint = c.PublicEndpoint
h.publicEndpointFn = c.PublicEndpointFn
h.server = c.ServerName
-}
-
-func handleErr(err error) {
- if err != nil {
- otel.Handle(err)
- }
-}
-
-func (h *middleware) createMeasures() {
- var err error
- h.requestBytesCounter, err = h.meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- h.responseBytesCounter, err = h.meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- h.serverLatencyMeasure, err = h.meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
+ h.semconv = semconv.NewHTTPServer(c.Meter)
+ h.metricAttributesFn = c.MetricAttributesFn
}
// serveHTTP sets up tracing and calls the given next http.Handler with the span
@@ -143,12 +98,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
opts := []trace.SpanStartOption{
- trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...),
- }
- if h.server != "" {
- hostAttr := semconv.NetHostName(h.server)
- opts = append(opts, trace.WithAttributes(hostAttr))
+ trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
}
+
opts = append(opts, h.spanStartOptions...)
if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
opts = append(opts, trace.WithNewRoot())
@@ -168,6 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
+ if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
+ opts = append(opts, trace.WithTimestamp(startTime))
+ requestStartTime = startTime
+ }
+
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
defer span.End()
@@ -178,14 +135,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
- var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, readRecordFunc)
if r.Body != nil && r.Body != http.NoBody {
- bw.ReadCloser = r.Body
- bw.record = readRecordFunc
- r.Body = &bw
+ r.Body = bw
}
writeRecordFunc := func(int64) {}
@@ -195,13 +150,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
- rww := &respWriterWrapper{
- ResponseWriter: w,
- record: writeRecordFunc,
- ctx: ctx,
- props: h.propagators,
- statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
- }
+ rww := request.NewRespWriterWrapper(w, writeRecordFunc)
// Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier,
@@ -217,61 +166,62 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
return rww.WriteHeader
},
+ Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
+ return rww.Flush
+ },
})
- labeler := &Labeler{}
- ctx = injectLabeler(ctx, labeler)
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
next.ServeHTTP(w, r.WithContext(ctx))
- setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err)
-
- // Add metrics
- attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
- if rww.statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
- }
- o := metric.WithAttributes(attributes...)
- h.requestBytesCounter.Add(ctx, bw.read.Load(), o)
- h.responseBytesCounter.Add(ctx, rww.written, o)
+ statusCode := rww.StatusCode()
+ bytesWritten := rww.BytesWritten()
+ span.SetStatus(h.semconv.Status(statusCode))
+ span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
+ StatusCode: statusCode,
+ ReadBytes: bw.BytesRead(),
+ ReadError: bw.Error(),
+ WriteBytes: bytesWritten,
+ WriteError: rww.Error(),
+ })...)
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
- h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
-}
-
-func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) {
- attributes := []attribute.KeyValue{}
-
- // TODO: Consider adding an event after each read and write, possibly as an
- // option (defaulting to off), so as to not create needlessly verbose spans.
- if read > 0 {
- attributes = append(attributes, ReadBytesKey.Int64(read))
- }
- if rerr != nil && rerr != io.EOF {
- attributes = append(attributes, ReadErrorKey.String(rerr.Error()))
- }
- if wrote > 0 {
- attributes = append(attributes, WroteBytesKey.Int64(wrote))
+ metricAttributes := semconv.MetricAttributes{
+ Req: r,
+ StatusCode: statusCode,
+ AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...),
}
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- span.SetStatus(semconvutil.HTTPServerStatus(statusCode))
- if werr != nil && werr != io.EOF {
- attributes = append(attributes, WriteErrorKey.String(werr.Error()))
+ h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
+ ServerName: h.server,
+ ResponseSize: bytesWritten,
+ MetricAttributes: metricAttributes,
+ MetricData: semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ ElapsedTime: elapsedTime,
+ },
+ })
+}
+
+func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+ var attributeForRequest []attribute.KeyValue
+ if h.metricAttributesFn != nil {
+ attributeForRequest = h.metricAttributesFn(r)
}
- span.SetAttributes(attributes...)
+ return attributeForRequest
}
// WithRouteTag annotates spans and metrics with the provided route name
// with HTTP route attribute.
func WithRouteTag(route string, h http.Handler) http.Handler {
+ attr := semconv.NewHTTPServer(nil).Route(route)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- attr := semconv.HTTPRouteKey.String(route)
-
span := trace.SpanFromContext(r.Context())
span.SetAttributes(attr)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
new file mode 100644
index 000000000..a945f5566
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "io"
+ "sync"
+)
+
+var _ io.ReadCloser = &BodyWrapper{}
+
+// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
+// of bytes read and the last error.
+type BodyWrapper struct {
+ io.ReadCloser
+ OnRead func(n int64) // must not be nil
+
+ mu sync.Mutex
+ read int64
+ err error
+}
+
+// NewBodyWrapper creates a new BodyWrapper.
+//
+// The onRead attribute is a callback that will be called every time the data
+// is read, with the number of bytes being read.
+func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
+ return &BodyWrapper{
+ ReadCloser: body,
+ OnRead: onRead,
+ }
+}
+
+// Read reads the data from the io.ReadCloser, and stores the number of bytes
+// read and the error.
+func (w *BodyWrapper) Read(b []byte) (int, error) {
+ n, err := w.ReadCloser.Read(b)
+ n1 := int64(n)
+
+ w.updateReadData(n1, err)
+ w.OnRead(n1)
+ return n, err
+}
+
+func (w *BodyWrapper) updateReadData(n int64, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.read += n
+ if err != nil {
+ w.err = err
+ }
+}
+
+// Closes closes the io.ReadCloser.
+func (w *BodyWrapper) Close() error {
+ return w.ReadCloser.Close()
+}
+
+// BytesRead returns the number of bytes read up to this point.
+func (w *BodyWrapper) BytesRead() int64 {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.read
+}
+
+// Error returns the last error.
+func (w *BodyWrapper) Error() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
new file mode 100644
index 000000000..fbc344cbd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
@@ -0,0 +1,119 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+
+import (
+ "net/http"
+ "sync"
+)
+
+var _ http.ResponseWriter = &RespWriterWrapper{}
+
+// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
+// bytes written, the last error, and to catch the first written statusCode.
+// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
+// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
+// that may be useful when using it in real life situations.
+type RespWriterWrapper struct {
+ http.ResponseWriter
+ OnWrite func(n int64) // must not be nil
+
+ mu sync.RWMutex
+ written int64
+ statusCode int
+ err error
+ wroteHeader bool
+}
+
+// NewRespWriterWrapper creates a new RespWriterWrapper.
+//
+// The onWrite attribute is a callback that will be called every time the data
+// is written, with the number of bytes that were written.
+func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
+ return &RespWriterWrapper{
+ ResponseWriter: w,
+ OnWrite: onWrite,
+ statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
+ }
+}
+
+// Write writes the bytes array into the [ResponseWriter], and tracks the
+// number of bytes written and last error.
+func (w *RespWriterWrapper) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if !w.wroteHeader {
+ w.writeHeader(http.StatusOK)
+ }
+
+ n, err := w.ResponseWriter.Write(p)
+ n1 := int64(n)
+ w.OnWrite(n1)
+ w.written += n1
+ w.err = err
+ return n, err
+}
+
+// WriteHeader persists initial statusCode for span attribution.
+// All calls to WriteHeader will be propagated to the underlying ResponseWriter
+// and will persist the statusCode from the first call.
+// Blocking consecutive calls to WriteHeader alters expected behavior and will
+// remove warning logs from net/http where developers will notice incorrect handler implementations.
+func (w *RespWriterWrapper) WriteHeader(statusCode int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.writeHeader(statusCode)
+}
+
+// writeHeader persists the status code for span attribution, and propagates
+// the call to the underlying ResponseWriter.
+// It does not acquire a lock, and therefore assumes that is being handled by a
+// parent method.
+func (w *RespWriterWrapper) writeHeader(statusCode int) {
+ if !w.wroteHeader {
+ w.wroteHeader = true
+ w.statusCode = statusCode
+ }
+ w.ResponseWriter.WriteHeader(statusCode)
+}
+
+// Flush implements [http.Flusher].
+func (w *RespWriterWrapper) Flush() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if !w.wroteHeader {
+ w.writeHeader(http.StatusOK)
+ }
+
+ if f, ok := w.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// BytesWritten returns the number of bytes written.
+func (w *RespWriterWrapper) BytesWritten() int64 {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.written
+}
+
+// BytesWritten returns the HTTP status code that was sent.
+func (w *RespWriterWrapper) StatusCode() int {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.statusCode
+}
+
+// Error returns the last error.
+func (w *RespWriterWrapper) Error() error {
+ w.mu.RLock()
+ defer w.mu.RUnlock()
+
+ return w.err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
new file mode 100644
index 000000000..eaf4c3796
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
@@ -0,0 +1,290 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/env.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// OTelSemConvStabilityOptIn is an environment variable.
+// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions.
+const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN"
+
+type ResponseTelemetry struct {
+ StatusCode int
+ ReadBytes int64
+ ReadError error
+ WriteBytes int64
+ WriteError error
+}
+
+type HTTPServer struct {
+ duplicate bool
+
+ // Old metrics
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ serverLatencyMeasure metric.Float64Histogram
+
+ // New metrics
+ requestBodySizeHistogram metric.Int64Histogram
+ responseBodySizeHistogram metric.Int64Histogram
+ requestDurationHistogram metric.Float64Histogram
+}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ if s.duplicate {
+ return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...)
+ }
+ return OldHTTPServer{}.RequestTraceAttrs(server, req)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ if s.duplicate {
+ return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...)
+ }
+ return OldHTTPServer{}.ResponseTraceAttrs(resp)
+}
+
+// Route returns the attribute for the route.
+func (s HTTPServer) Route(route string) attribute.KeyValue {
+ return OldHTTPServer{}.Route(route)
+}
+
+// Status returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (s HTTPServer) Status(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 500 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+type ServerMetricData struct {
+ ServerName string
+ ResponseSize int64
+
+ MetricData
+ MetricAttributes
+}
+
+type MetricAttributes struct {
+ Req *http.Request
+ StatusCode int
+ AdditionalAttributes []attribute.KeyValue
+}
+
+type MetricData struct {
+ RequestSize int64
+ ElapsedTime float64
+}
+
+var (
+ metricAddOptionPool = &sync.Pool{
+ New: func() interface{} {
+ return &[]metric.AddOption{}
+ },
+ }
+
+ metricRecordOptionPool = &sync.Pool{
+ New: func() interface{} {
+ return &[]metric.RecordOption{}
+ },
+ }
+)
+
+func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
+ if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
+ attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
+ *addOpts = append(*addOpts, o)
+ s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
+ s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
+ s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
+ *addOpts = (*addOpts)[:0]
+ metricAddOptionPool.Put(addOpts)
+ }
+
+ if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
+ attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
+ o := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
+ *recordOpts = append(*recordOpts, o)
+ s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
+ s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
+ s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o)
+ *recordOpts = (*recordOpts)[:0]
+ metricRecordOptionPool.Put(recordOpts)
+ }
+}
+
+func NewHTTPServer(meter metric.Meter) HTTPServer {
+ env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
+ duplicate := env == "http/dup"
+ server := HTTPServer{
+ duplicate: duplicate,
+ }
+ server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
+ if duplicate {
+ server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
+ }
+ return server
+}
+
+type HTTPClient struct {
+ duplicate bool
+
+ // old metrics
+ requestBytesCounter metric.Int64Counter
+ responseBytesCounter metric.Int64Counter
+ latencyMeasure metric.Float64Histogram
+
+ // new metrics
+ requestBodySize metric.Int64Histogram
+ requestDuration metric.Float64Histogram
+}
+
+func NewHTTPClient(meter metric.Meter) HTTPClient {
+ env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
+ duplicate := env == "http/dup"
+ client := HTTPClient{
+ duplicate: duplicate,
+ }
+ client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
+ if duplicate {
+ client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
+ }
+
+ return client
+}
+
+// RequestTraceAttrs returns attributes for an HTTP request made by a client.
+func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ if c.duplicate {
+ return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...)
+ }
+ return OldHTTPClient{}.RequestTraceAttrs(req)
+}
+
+// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
+func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ if c.duplicate {
+ return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...)
+ }
+
+ return OldHTTPClient{}.ResponseTraceAttrs(resp)
+}
+
+func (c HTTPClient) Status(code int) (codes.Code, string) {
+ if code < 100 || code >= 600 {
+ return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
+ }
+ if code >= 400 {
+ return codes.Error, ""
+ }
+ return codes.Unset, ""
+}
+
+func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
+ if c.duplicate {
+ return CurrentHTTPClient{}.ErrorType(err)
+ }
+
+ return attribute.KeyValue{}
+}
+
+type MetricOpts struct {
+ measurement metric.MeasurementOption
+ addOptions metric.AddOption
+}
+
+func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
+ return o.measurement
+}
+
+func (o MetricOpts) AddOptions() metric.AddOption {
+ return o.addOptions
+}
+
+func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
+ opts := map[string]MetricOpts{}
+
+ attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
+ set := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ opts["old"] = MetricOpts{
+ measurement: set,
+ addOptions: set,
+ }
+
+ if c.duplicate {
+ attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
+ set := metric.WithAttributeSet(attribute.NewSet(attributes...))
+ opts["new"] = MetricOpts{
+ measurement: set,
+ addOptions: set,
+ }
+ }
+
+ return opts
+}
+
+func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
+ if s.requestBytesCounter == nil || s.latencyMeasure == nil {
+ // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
+ return
+ }
+
+ s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
+ s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
+
+ if s.duplicate {
+ s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
+ s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption())
+ }
+}
+
+func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
+ if s.responseBytesCounter == nil {
+ // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
+ return
+ }
+
+ s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
new file mode 100644
index 000000000..32630864b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+// Generate semconv package:
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={}" --out=bench_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={}" --out=env.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={}" --out=env_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={}" --out=httpconv.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={}" --out=util.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={}" --out=util_test.go
+//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={}" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
new file mode 100644
index 000000000..8c3c62751
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
@@ -0,0 +1,519 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/httpconv.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "fmt"
+ "net/http"
+ "reflect"
+ "slices"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type CurrentHTTPServer struct{}
+
+// TraceRequest returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ count := 3 // ServerAddress, Method, Scheme
+
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ count++
+ }
+
+ method, methodOriginal := n.method(req.Method)
+ if methodOriginal != (attribute.KeyValue{}) {
+ count++
+ }
+
+ scheme := n.scheme(req.TLS != nil)
+
+ if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ count++
+ if peerPort > 0 {
+ count++
+ }
+ }
+
+ useragent := req.UserAgent()
+ if useragent != "" {
+ count++
+ }
+
+ clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+ if clientIP != "" {
+ count++
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ count++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ count++
+ }
+ if protoVersion != "" {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ attrs = append(attrs,
+ semconvNew.ServerAddress(host),
+ method,
+ scheme,
+ )
+
+ if hostPort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(hostPort))
+ }
+ if methodOriginal != (attribute.KeyValue{}) {
+ attrs = append(attrs, methodOriginal)
+ }
+
+ if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
+ // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+ // file-path that would be interpreted with a sock family.
+ attrs = append(attrs, semconvNew.NetworkPeerAddress(peer))
+ if peerPort > 0 {
+ attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort))
+ }
+ }
+
+ if useragent := req.UserAgent(); useragent != "" {
+ attrs = append(attrs, semconvNew.UserAgentOriginal(useragent))
+ }
+
+ if clientIP != "" {
+ attrs = append(attrs, semconvNew.ClientAddress(clientIP))
+ }
+
+ if req.URL != nil && req.URL.Path != "" {
+ attrs = append(attrs, semconvNew.URLPath(req.URL.Path))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ return attrs
+}
+
+func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconvNew.URLScheme("https")
+ }
+ return semconvNew.URLScheme("http")
+}
+
+// TraceResponse returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ var count int
+
+ if resp.ReadBytes > 0 {
+ count++
+ }
+ if resp.WriteBytes > 0 {
+ count++
+ }
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ attributes := make([]attribute.KeyValue, 0, count)
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)),
+ )
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)),
+ )
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes,
+ semconvNew.HTTPResponseStatusCode(resp.StatusCode),
+ )
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
+ return semconvNew.HTTPRoute(route)
+}
+
+func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
+ }
+
+ var err error
+ requestBodySizeHistogram, err := meter.Int64Histogram(
+ semconvNew.HTTPServerRequestBodySizeName,
+ metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
+ metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
+ )
+ handleErr(err)
+
+ responseBodySizeHistogram, err := meter.Int64Histogram(
+ semconvNew.HTTPServerResponseBodySizeName,
+ metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
+ metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
+ )
+ handleErr(err)
+ requestDurationHistogram, err := meter.Float64Histogram(
+ semconvNew.HTTPServerRequestDurationName,
+ metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
+ metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
+ )
+ handleErr(err)
+
+ return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
+}
+
+func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ num := len(additionalAttributes) + 3
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ num++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ num++
+ }
+ if protoVersion != "" {
+ num++
+ }
+
+ if statusCode > 0 {
+ num++
+ }
+
+ attributes := slices.Grow(additionalAttributes, num)
+ attributes = append(attributes,
+ semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
+ n.scheme(req.TLS != nil),
+ semconvNew.ServerAddress(host))
+
+ if hostPort > 0 {
+ attributes = append(attributes, semconvNew.ServerPort(hostPort))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
+ }
+ return attributes
+}
+
+type CurrentHTTPClient struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
+func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.request.method
+ - http.request.method.original
+ - url.full
+ - server.address
+ - server.port
+ - network.protocol.name
+ - network.protocol.version
+ */
+ numOfAttributes := 3 // URL, server address, proto, and method.
+
+ var urlHost string
+ if req.URL != nil {
+ urlHost = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if eligiblePort > 0 {
+ numOfAttributes++
+ }
+ useragent := req.UserAgent()
+ if useragent != "" {
+ numOfAttributes++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" && protoName != "http" {
+ numOfAttributes++
+ }
+ if protoVersion != "" {
+ numOfAttributes++
+ }
+
+ method, originalMethod := n.method(req.Method)
+ if originalMethod != (attribute.KeyValue{}) {
+ numOfAttributes++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, numOfAttributes)
+
+ attrs = append(attrs, method)
+ if originalMethod != (attribute.KeyValue{}) {
+ attrs = append(attrs, originalMethod)
+ }
+
+ var u string
+ if req.URL != nil {
+ // Remove any username/password info that may be in the URL.
+ userinfo := req.URL.User
+ req.URL.User = nil
+ u = req.URL.String()
+ // Restore any username/password info that was removed.
+ req.URL.User = userinfo
+ }
+ attrs = append(attrs, semconvNew.URLFull(u))
+
+ attrs = append(attrs, semconvNew.ServerAddress(requestHost))
+ if eligiblePort > 0 {
+ attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
+ }
+
+ if protoName != "" && protoName != "http" {
+ attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ return attrs
+}
+
+// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
+func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ /*
+ below attributes are returned:
+ - http.response.status_code
+ - error.type
+ */
+ var count int
+ if resp.StatusCode > 0 {
+ count++
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ count++
+ }
+
+ attrs := make([]attribute.KeyValue, 0, count)
+ if resp.StatusCode > 0 {
+ attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
+ }
+
+ if isErrorStatusCode(resp.StatusCode) {
+ errorType := strconv.Itoa(resp.StatusCode)
+ attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
+ }
+ return attrs
+}
+
+func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue {
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return semconvNew.ErrorTypeOther
+ }
+
+ return semconvNew.ErrorTypeKey.String(value)
+}
+
+func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
+ if method == "" {
+ return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
+ }
+ if attr, ok := methodLookup[method]; ok {
+ return attr, attribute.KeyValue{}
+ }
+
+ orig := semconvNew.HTTPRequestMethodOriginal(method)
+ if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
+ return attr, orig
+ }
+ return semconvNew.HTTPRequestMethodGet, orig
+}
+
+func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Histogram{}, noop.Float64Histogram{}
+ }
+
+ var err error
+ requestBodySize, err := meter.Int64Histogram(
+ semconvNew.HTTPClientRequestBodySizeName,
+ metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
+ metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
+ )
+ handleErr(err)
+
+ requestDuration, err := meter.Float64Histogram(
+ semconvNew.HTTPClientRequestDurationName,
+ metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
+ metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
+ )
+ handleErr(err)
+
+ return requestBodySize, requestDuration
+}
+
+func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ num := len(additionalAttributes) + 2
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{h, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if port > 0 {
+ num++
+ }
+
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ num++
+ }
+ if protoVersion != "" {
+ num++
+ }
+
+ if statusCode > 0 {
+ num++
+ }
+
+ attributes := slices.Grow(additionalAttributes, num)
+ attributes = append(attributes,
+ semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
+ semconvNew.ServerAddress(requestHost),
+ n.scheme(req.TLS != nil),
+ )
+
+ if port > 0 {
+ attributes = append(attributes, semconvNew.ServerPort(port))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
+ }
+ return attributes
+}
+
+func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconvNew.URLScheme("https")
+ }
+ return semconvNew.URLScheme("http")
+}
+
+func isErrorStatusCode(code int) bool {
+ return code >= 400 || code < 100
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
new file mode 100644
index 000000000..558efd059
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
@@ -0,0 +1,111 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/util.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// SplitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func SplitHostPort(hostport string) (host string, port int) {
+ port = -1
+
+ if strings.HasPrefix(hostport, "[") {
+ addrEnd := strings.LastIndex(hostport, "]")
+ if addrEnd < 0 {
+ // Invalid hostport.
+ return
+ }
+ if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+ host = hostport[1:addrEnd]
+ return
+ }
+ } else {
+ if i := strings.LastIndex(hostport, ":"); i < 0 {
+ host = hostport
+ return
+ }
+ }
+
+ host, pStr, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return
+ }
+
+ p, err := strconv.ParseUint(pStr, 10, 16)
+ if err != nil {
+ return
+ }
+ return host, int(p) // nolint: gosec // Byte size checked 16 above.
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+ if https {
+ if port > 0 && port != 443 {
+ return port
+ }
+ } else {
+ if port > 0 && port != 80 {
+ return port
+ }
+ }
+ return -1
+}
+
+func serverClientIP(xForwardedFor string) string {
+ if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+ xForwardedFor = xForwardedFor[:idx]
+ }
+ return xForwardedFor
+}
+
+func netProtocol(proto string) (name string, version string) {
+ name, version, _ = strings.Cut(proto, "/")
+ name = strings.ToLower(name)
+ return name, version
+}
+
+var methodLookup = map[string]attribute.KeyValue{
+ http.MethodConnect: semconvNew.HTTPRequestMethodConnect,
+ http.MethodDelete: semconvNew.HTTPRequestMethodDelete,
+ http.MethodGet: semconvNew.HTTPRequestMethodGet,
+ http.MethodHead: semconvNew.HTTPRequestMethodHead,
+ http.MethodOptions: semconvNew.HTTPRequestMethodOptions,
+ http.MethodPatch: semconvNew.HTTPRequestMethodPatch,
+ http.MethodPost: semconvNew.HTTPRequestMethodPost,
+ http.MethodPut: semconvNew.HTTPRequestMethodPut,
+ http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
+}
+
+func handleErr(err error) {
+ if err != nil {
+ otel.Handle(err)
+ }
+}
+
+func standardizeHTTPMethod(method string) string {
+ method = strings.ToUpper(method)
+ switch method {
+ case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
+ default:
+ method = "_OTHER"
+ }
+ return method
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
new file mode 100644
index 000000000..57d1507b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
@@ -0,0 +1,266 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/semconv/v120.0.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "slices"
+
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+)
+
+type OldHTTPServer struct{}
+
+// RequestTraceAttrs returns trace attributes for an HTTP request received by a
+// server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
+ return semconvutil.HTTPServerRequest(server, req)
+}
+
+// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
+//
+// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
+func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
+ attributes := []attribute.KeyValue{}
+
+ if resp.ReadBytes > 0 {
+ attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
+ }
+ if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
+ }
+ if resp.WriteBytes > 0 {
+ attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
+ }
+ if resp.StatusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
+ }
+ if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
+ // This is not in the semantic conventions, but is historically provided
+ attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
+ }
+
+ return attributes
+}
+
+// Route returns the attribute for the route.
+func (o OldHTTPServer) Route(route string) attribute.KeyValue {
+ return semconv.HTTPRoute(route)
+}
+
+// HTTPStatusCode returns the attribute for the HTTP status code.
+// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
+func HTTPStatusCode(status int) attribute.KeyValue {
+ return semconv.HTTPStatusCode(status)
+}
+
+// Server HTTP metrics.
+const (
+ serverRequestSize = "http.server.request.size" // Incoming request bytes total
+ serverResponseSize = "http.server.response.size" // Incoming response bytes total
+ serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
+)
+
+func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+ }
+ var err error
+ requestBytesCounter, err := meter.Int64Counter(
+ serverRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ responseBytesCounter, err := meter.Int64Counter(
+ serverResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ serverLatencyMeasure, err := meter.Float64Histogram(
+ serverDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of inbound HTTP requests."),
+ )
+ handleErr(err)
+
+ return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
+}
+
+func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ n := len(additionalAttributes) + 3
+ var host string
+ var p int
+ if server == "" {
+ host, p = SplitHostPort(req.Host)
+ } else {
+ // Prioritize the primary server name.
+ host, p = SplitHostPort(server)
+ if p < 0 {
+ _, p = SplitHostPort(req.Host)
+ }
+ }
+ hostPort := requiredHTTPPort(req.TLS != nil, p)
+ if hostPort > 0 {
+ n++
+ }
+ protoName, protoVersion := netProtocol(req.Proto)
+ if protoName != "" {
+ n++
+ }
+ if protoVersion != "" {
+ n++
+ }
+
+ if statusCode > 0 {
+ n++
+ }
+
+ attributes := slices.Grow(additionalAttributes, n)
+ attributes = append(attributes,
+ semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
+ o.scheme(req.TLS != nil),
+ semconv.NetHostName(host))
+
+ if hostPort > 0 {
+ attributes = append(attributes, semconv.NetHostPort(hostPort))
+ }
+ if protoName != "" {
+ attributes = append(attributes, semconv.NetProtocolName(protoName))
+ }
+ if protoVersion != "" {
+ attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ return attributes
+}
+
+func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
+ if https {
+ return semconv.HTTPSchemeHTTPS
+ }
+ return semconv.HTTPSchemeHTTP
+}
+
+type OldHTTPClient struct{}
+
+func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
+ return semconvutil.HTTPClientRequest(req)
+}
+
+func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
+ return semconvutil.HTTPClientResponse(resp)
+}
+
+func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
+ /* The following semantic conventions are returned if present:
+ http.method string
+ http.status_code int
+ net.peer.name string
+ net.peer.port int
+ */
+
+ n := 2 // method, peer name.
+ var h string
+ if req.URL != nil {
+ h = req.URL.Host
+ }
+ var requestHost string
+ var requestPort int
+ for _, hostport := range []string{h, req.Header.Get("Host")} {
+ requestHost, requestPort = SplitHostPort(hostport)
+ if requestHost != "" || requestPort > 0 {
+ break
+ }
+ }
+
+ port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
+ if port > 0 {
+ n++
+ }
+
+ if statusCode > 0 {
+ n++
+ }
+
+ attributes := slices.Grow(additionalAttributes, n)
+ attributes = append(attributes,
+ semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
+ semconv.NetPeerName(requestHost),
+ )
+
+ if port > 0 {
+ attributes = append(attributes, semconv.NetPeerPort(port))
+ }
+
+ if statusCode > 0 {
+ attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
+ }
+ return attributes
+}
+
+// Client HTTP metrics.
+const (
+ clientRequestSize = "http.client.request.size" // Incoming request bytes total
+ clientResponseSize = "http.client.response.size" // Incoming response bytes total
+ clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
+)
+
+func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
+ if meter == nil {
+ return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
+ }
+ requestBytesCounter, err := meter.Int64Counter(
+ clientRequestSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP request messages."),
+ )
+ handleErr(err)
+
+ responseBytesCounter, err := meter.Int64Counter(
+ clientResponseSize,
+ metric.WithUnit("By"),
+ metric.WithDescription("Measures the size of HTTP response messages."),
+ )
+ handleErr(err)
+
+ latencyMeasure, err := meter.Float64Histogram(
+ clientDuration,
+ metric.WithUnit("ms"),
+ metric.WithDescription("Measures the duration of outbound HTTP requests."),
+ )
+ handleErr(err)
+
+ return requestBytesCounter, responseBytesCounter, latencyMeasure
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
index edf4ce3d3..7aa5f99e8 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
index 0efd5261f..a73bb06e9 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
@@ -2,18 +2,7 @@
// source: internal/shared/semconvutil/httpconv.go.tmpl
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
index d3a06e0ca..b80a1db61 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
@@ -2,17 +2,7 @@
// source: internal/shared/semconvutil/netconv.go.tmpl
// Copyright The OpenTelemetry Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
@@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, n)
attrs = append(attrs, c.HostName(h))
if p > 0 {
- attrs = append(attrs, c.HostPort(int(p)))
+ attrs = append(attrs, c.HostPort(p))
}
return attrs
}
@@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, n)
attrs = append(attrs, c.PeerName(h))
if p > 0 {
- attrs = append(attrs, c.PeerPort(int(p)))
+ attrs = append(attrs, c.PeerPort(p))
}
return attrs
}
@@ -205,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
if err != nil {
return
}
- return host, int(p)
+ return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
}
func netProtocol(proto string) (name string, version string) {
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
index 26a51a180..ea504e396 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
@@ -48,8 +37,12 @@ type labelerContextKeyType int
const lablelerContextKey labelerContextKeyType = 0
-func injectLabeler(ctx context.Context, l *Labeler) context.Context {
- return context.WithValue(ctx, lablelerContextKey, l)
+// ContextWithLabeler returns a new context with the provided Labeler instance.
+// Attributes added to the specified labeler will be injected into metrics
+// emitted by the instrumentation. Only one labeller can be injected into the
+// context. Injecting it multiple times will override the previous calls.
+func ContextWithLabeler(parent context.Context, l *Labeler) context.Context {
+ return context.WithValue(parent, lablelerContextKey, l)
}
// LabelerFromContext retrieves a Labeler instance from the provided context if
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
new file mode 100644
index 000000000..9476ef01b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+
+import (
+ "context"
+ "time"
+)
+
+type startTimeContextKeyType int
+
+const startTimeContextKey startTimeContextKeyType = 0
+
+// ContextWithStartTime returns a new context with the provided start time. The
+// start time will be used for metrics and traces emitted by the
+// instrumentation. Only one labeller can be injected into the context.
+// Injecting it multiple times will override the previous calls.
+func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
+ return context.WithValue(parent, startTimeContextKey, start)
+}
+
+// StartTimeFromContext retrieves a time.Time from the provided context if one
+// is available. If no start time was found in the provided context, a new,
+// zero start time is returned and the second return value is false.
+func StartTimeFromContext(ctx context.Context) time.Time {
+ t, _ := ctx.Value(startTimeContextKey).(time.Time)
+ return t
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
index 43e937a67..44b86ad86 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
@@ -22,15 +11,14 @@ import (
"sync/atomic"
"time"
- "go.opentelemetry.io/otel/metric"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
+ "go.opentelemetry.io/otel/trace"
)
// Transport implements the http.RoundTripper interface and wraps
@@ -38,17 +26,15 @@ import (
type Transport struct {
rt http.RoundTripper
- tracer trace.Tracer
- meter metric.Meter
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- clientTrace func(context.Context) *httptrace.ClientTrace
-
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- latencyMeasure metric.Float64Histogram
+ tracer trace.Tracer
+ propagators propagation.TextMapPropagator
+ spanStartOptions []trace.SpanStartOption
+ filters []Filter
+ spanNameFormatter func(string, *http.Request) string
+ clientTrace func(context.Context) *httptrace.ClientTrace
+ metricAttributesFn func(*http.Request) []attribute.KeyValue
+
+ semconv semconv.HTTPClient
}
var _ http.RoundTripper = &Transport{}
@@ -75,43 +61,19 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
c := newConfig(append(defaultOpts, opts...)...)
t.applyConfig(c)
- t.createMeasures()
return &t
}
func (t *Transport) applyConfig(c *config) {
t.tracer = c.Tracer
- t.meter = c.Meter
t.propagators = c.Propagators
t.spanStartOptions = c.SpanStartOptions
t.filters = c.Filters
t.spanNameFormatter = c.SpanNameFormatter
t.clientTrace = c.ClientTrace
-}
-
-func (t *Transport) createMeasures() {
- var err error
- t.requestBytesCounter, err = t.meter.Int64Counter(
- clientRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- t.responseBytesCounter, err = t.meter.Int64Counter(
- clientResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- t.latencyMeasure, err = t.meter.Float64Histogram(
- clientDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of outbound HTTP requests."),
- )
- handleErr(err)
+ t.semconv = semconv.NewHTTPClient(c.Meter)
+ t.metricAttributesFn = c.MetricAttributesFn
}
func defaultTransportFormatter(_ string, r *http.Request) string {
@@ -148,58 +110,75 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
}
- labeler := &Labeler{}
- ctx = injectLabeler(ctx, labeler)
+ labeler, found := LabelerFromContext(ctx)
+ if !found {
+ ctx = ContextWithLabeler(ctx, labeler)
+ }
r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
- // use a body wrapper to determine the request size
- var bw bodyWrapper
// if request body is nil or NoBody, we don't want to mutate the body as it
// will affect the identity of it in an unforeseeable way because we assert
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ bw := request.NewBodyWrapper(r.Body, func(int64) {})
if r.Body != nil && r.Body != http.NoBody {
- bw.ReadCloser = r.Body
- // noop to prevent nil panic. not using this record fun yet.
- bw.record = func(int64) {}
- r.Body = &bw
+ r.Body = bw
}
- span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
+ span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
res, err := t.rt.RoundTrip(r)
if err != nil {
- span.RecordError(err)
+ // set error type attribute if the error is part of the predefined
+ // error types.
+ // otherwise, record it as an exception
+ if errType := t.semconv.ErrorType(err); errType.Valid() {
+ span.SetAttributes(errType)
+ } else {
+ span.RecordError(err)
+ }
+
span.SetStatus(codes.Error, err.Error())
span.End()
return res, err
}
// metrics
- metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
- if res.StatusCode > 0 {
- metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
- }
- o := metric.WithAttributes(metricAttrs...)
- t.requestBytesCounter.Add(ctx, bw.read.Load(), o)
+ metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
+ Req: r,
+ StatusCode: res.StatusCode,
+ AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
+ })
+
// For handling response bytes we leverage a callback when the client reads the http response
readRecordFunc := func(n int64) {
- t.responseBytesCounter.Add(ctx, n, o)
+ t.semconv.RecordResponseSize(ctx, n, metricOpts)
}
// traces
- span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
- span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
+ span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
+ span.SetStatus(t.semconv.Status(res.StatusCode))
res.Body = newWrappedBody(span, readRecordFunc, res.Body)
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
- t.latencyMeasure.Record(ctx, elapsedTime, o)
+ t.semconv.RecordMetrics(ctx, semconv.MetricData{
+ RequestSize: bw.BytesRead(),
+ ElapsedTime: elapsedTime,
+ }, metricOpts)
+
+ return res, nil
+}
- return res, err
+func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
+ var attributeForRequest []attribute.KeyValue
+ if t.metricAttributesFn != nil {
+ attributeForRequest = t.metricAttributesFn(r)
+ }
+ return attributeForRequest
}
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index 35254e888..386f09e1b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -1,22 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.49.0"
+ return "0.59.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
deleted file mode 100644
index 2852ec971..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-var _ io.ReadCloser = &bodyWrapper{}
-
-// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type bodyWrapper struct {
- io.ReadCloser
- record func(n int64) // must not be nil
-
- read atomic.Int64
- err error
-}
-
-func (w *bodyWrapper) Read(b []byte) (int, error) {
- n, err := w.ReadCloser.Read(b)
- n1 := int64(n)
- w.read.Add(n1)
- w.err = err
- w.record(n1)
- return n, err
-}
-
-func (w *bodyWrapper) Close() error {
- return w.ReadCloser.Close()
-}
-
-var _ http.ResponseWriter = &respWriterWrapper{}
-
-// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the first written statusCode.
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
-// that may be useful when using it in real life situations.
-type respWriterWrapper struct {
- http.ResponseWriter
- record func(n int64) // must not be nil
-
- // used to inject the header
- ctx context.Context
-
- props propagation.TextMapPropagator
-
- written int64
- statusCode int
- err error
- wroteHeader bool
-}
-
-func (w *respWriterWrapper) Header() http.Header {
- return w.ResponseWriter.Header()
-}
-
-func (w *respWriterWrapper) Write(p []byte) (int, error) {
- if !w.wroteHeader {
- w.WriteHeader(http.StatusOK)
- }
- n, err := w.ResponseWriter.Write(p)
- n1 := int64(n)
- w.record(n1)
- w.written += n1
- w.err = err
- return n, err
-}
-
-// WriteHeader persists initial statusCode for span attribution.
-// All calls to WriteHeader will be propagated to the underlying ResponseWriter
-// and will persist the statusCode from the first call.
-// Blocking consecutive calls to WriteHeader alters expected behavior and will
-// remove warning logs from net/http where developers will notice incorrect handler implementations.
-func (w *respWriterWrapper) WriteHeader(statusCode int) {
- if !w.wroteHeader {
- w.wroteHeader = true
- w.statusCode = statusCode
- }
- w.ResponseWriter.WriteHeader(statusCode)
-}
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 120b63a9c..6bf3abc41 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -5,3 +5,5 @@ collison
consequentially
ans
nam
+valu
+thirdparty
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
index 4afbb1fb3..e2cb3ea94 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellrc
+++ b/vendor/go.opentelemetry.io/otel/.codespellrc
@@ -5,6 +5,6 @@ check-filenames =
check-hidden =
ignore-words = .codespellignore
interactive = 1
-skip = .git,go.mod,go.sum,semconv,venv,.tools
+skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools
uri-ignore-words-list = *
write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
index 895c7664b..ae8577ef3 100644
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -12,11 +12,3 @@ go.work
go.work.sum
gen/
-
-/example/dice/dice
-/example/namedtracer/namedtracer
-/example/otel-collector/otel-collector
-/example/opencensus/opencensus
-/example/passthrough/passthrough
-/example/prometheus/prometheus
-/example/zipkin/zipkin
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
deleted file mode 100644
index 38a1f5698..000000000
--- a/vendor/go.opentelemetry.io/otel/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "opentelemetry-proto"]
- path = exporters/otlp/internal/opentelemetry-proto
- url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index a62511f38..ce3f40b60 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -9,8 +9,11 @@ linters:
disable-all: true
# Specifically enable linters we want to use.
enable:
+ - asasalint
+ - bodyclose
- depguard
- errcheck
+ - errorlint
- godot
- gofumpt
- goimports
@@ -19,10 +22,16 @@ linters:
- govet
- ineffassign
- misspell
+ - perfsprint
- revive
- staticcheck
+ - tenv
+ - testifylint
- typecheck
+ - unconvert
- unused
+ - unparam
+ - usestdlibvars
issues:
# Maximum issues count per one linter.
@@ -54,16 +63,17 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
- # It's okay to not run gosec in a test.
+ # It's okay to not run gosec and perfsprint in a test.
- path: _test\.go
linters:
- gosec
- # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+ - perfsprint
+ # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
linters:
- gosec
- # Igonoring gosec G402: TLS MinVersion too low
+ # Ignoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low."
linters:
@@ -88,6 +98,13 @@ linters-settings:
- pkg: "crypto/md5"
- pkg: "crypto/sha1"
- pkg: "crypto/**/pkix"
+ auto/sdk:
+ files:
+ - "!internal/global/trace.go"
+ - "~internal/global/trace_test.go"
+ deny:
+ - pkg: "go.opentelemetry.io/auto/sdk"
+ desc: Do not use SDK from automatic instrumentation.
otlp-internal:
files:
- "!**/exporters/otlp/internal/**/*.go"
@@ -120,10 +137,10 @@ linters-settings:
- "**/metric/**/*.go"
- "**/bridge/*.go"
- "**/bridge/**/*.go"
- - "**/example/*.go"
- - "**/example/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
+ - "**/log/*.go"
+ - "**/log/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/internal$"
desc: Do not use cross-module internal packages.
@@ -147,6 +164,12 @@ linters-settings:
locale: US
ignore-words:
- cancelled
+ perfsprint:
+ err-error: true
+ errorf: true
+ int-conversion: true
+ sprintf1: true
+ strconcat: true
revive:
# Sets the default failure confidence.
# This means that linting errors with less than 0.8 confidence will be ignored.
@@ -294,3 +317,9 @@ linters-settings:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- name: waitgroup-by-value
disabled: false
+ testifylint:
+ enable-all: true
+ disable:
+ - float-compare
+ - go-require
+ - require-error
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 98f2d2043..599d59cd1 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,353 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+
+
+
+## [1.34.0/0.56.0/0.10.0] 2025-01-17
+
+### Changed
+
+- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
+
+### Fixed
+
+- Relax minimum Go version to 1.22.0 in various modules. (#6073)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
+
+## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
+
+### Added
+
+- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994)
+- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`.
+ This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`.
+ Users can use it to avoid performing computationally expensive operations when recording measurements.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016)
+
+### Changed
+
+- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package.
+ See that package for more information. (#5920)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929)
+- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011)
+- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009)
+
+### Fixed
+
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954)
+- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
+
+## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
+- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
+- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
+- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
+- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
+- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
+ The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
+- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
+- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
+
+### Changed
+
+- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
+- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
+- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
+- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
+- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
+
+### Fixed
+
+- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
+- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
+
+### Removed
+
+- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
+
+## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
+- Add `WithExportBufferSize` option to log batch processor.(#5877)
+
+### Changed
+
+- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
+- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
+- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
+- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
+- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
+- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
+- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
+
+### Deprecated
+
+- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
+
+### Fixed
+
+- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
+- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
+- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
+- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
+- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
+
+## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
+
+### Added
+
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
+- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
+
+### Fixed
+
+- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
+- Fix panic on instruments creation when setting meter provider. (#5758)
+- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
+
+### Removed
+
+- Drop support for [Go 1.21]. (#5736, #5740, #5800)
+
+## [1.29.0/0.51.0/0.5.0] 2024-08-23
+
+This release is the last to support [Go 1.21].
+The next release will require at least [Go 1.22].
+
+### Added
+
+- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
+- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
+- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
+ This new module contains an OTLP exporter that transmits log telemetry using gRPC.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
+- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
+- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
+- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
+- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
+ This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
+ It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
+- Support [Go 1.23]. (#5720)
+
+### Changed
+
+- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
+- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
+- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
+- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
+- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
+- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
+ See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
+- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+
+### Fixed
+
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
+- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
+- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
+- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
+- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
+- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+
+### Removed
+
+- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+
+## [1.28.0/0.50.0/0.4.0] 2024-07-02
+
+### Added
+
+- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`.
+ This method is used to check if an `Instrument` instance is a zero-value. (#5431)
+- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468)
+- The `go.opentelemetry.io/otel/semconv/v1.26.0` package.
+ The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476)
+- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499)
+- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530)
+
+### Changed
+
+- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490)
+ - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes.
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490)
+- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493)
+- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497)
+- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520)
+- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545)
+
+### Fixed
+
+- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376)
+- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434)
+- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435)
+- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456)
+- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464)
+- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508)
+- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514)
+- Fix stale timestamps reported by the last-value aggregation. (#5517)
+- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
+- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
+
+## [1.27.0/0.49.0/0.3.0] 2024-05-21
+
+### Added
+
+- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
+- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
+- Add metrics in the `otel-collector` example. (#5283)
+- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
+ - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
+ - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
+- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
+
+### Changed
+
+- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
+- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
+- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
+- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
+- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
+- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
+- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
+- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
+
+### Fixed
+
+- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
+- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
+- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
+- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
+- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
+
+## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
+
+### Added
+
+- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
+- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
+- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
+ This new module contains the Go implementation of the OpenTelemetry Logs SDK.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
+ This new module contains an OTLP exporter that transmits log telemetry using HTTP.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
+ This new module contains an exporter prints log records to STDOUT.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
+ The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
+
+### Changed
+
+- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
+- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
+- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
+
+### Fixed
+
+- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
+
+## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
+
+### Added
+
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
+- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
+- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
+ This method is used to notify users if a log record will be emitted or not. (#5071)
+- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
+ This value represents an unset severity level. (#5072)
+- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
+- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
+ This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
+ At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
+- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
+- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
+- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
+- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
+- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
+- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
+
+### Changed
+
+- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
+ Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
+ Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
+
+### Fixed
+
+- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
+- Prevent default `ErrorHandler` self-delegation. (#5137)
+- Update all dependencies to address [GO-2024-2687]. (#5139)
+
+### Removed
+
+- Drop support for [Go 1.20]. (#4967)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
+
## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
This release is the last to support [Go 1.20].
@@ -22,6 +369,7 @@ The next release will require at least [Go 1.21].
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
This module is in an alpha state, it is subject to breaking changes.
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
+- Add ARM64 platform to the compatibility testing suite. (#4994)
### Fixed
@@ -138,7 +486,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab
## [1.20.0/0.43.0] 2023-11-10
-This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
### Added
@@ -170,15 +518,15 @@ This release brings a breaking change for custom trace API implementations. Some
- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
This extends the `Tracer` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
This extends the `Span` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
@@ -814,7 +1162,7 @@ The next release will require at least [Go 1.19].
- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
- Re-enabled Attribute Filters in the Metric SDK. (#3396)
-- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408)
- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
- Prevent duplicate Prometheus description, unit, and type. (#3469)
@@ -1682,7 +2030,7 @@ with major version 0.
- Setting error status while recording error with Span from oteltest package. (#1729)
- The concept of a remote and local Span stored in a context is unified to just the current Span.
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
- Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
+ Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
@@ -1859,7 +2207,7 @@ with major version 0.
- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
- Unify endpoint API that related to OTel exporter. (#1401)
-- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
+- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435)
- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
@@ -2256,7 +2604,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-colector example to use the v0.5.0 collector. (#915)
+- Update otel-collector example to use the v0.5.0 collector. (#915)
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
@@ -2849,7 +3197,17 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD
+[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
+[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
+[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
+[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
+[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
+[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
+[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
+[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
+[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
+[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
@@ -2928,6 +3286,9 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+
+[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20
@@ -2937,3 +3298,5 @@ It contains api and sdk for trace and meter.
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
+
+[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 31d336d92..945a07d2b 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -5,13 +5,13 @@
#####################################################
#
# Learn about membership in OpenTelemetry community:
-# https://github.com/open-telemetry/community/blob/main/community-membership.md
+# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
#
#
# Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+* @MrAlias @XSAM @dashpole @pellared @dmathieu
-CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole
\ No newline at end of file
+CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index c9f2bac55..22a2e9dbd 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way:
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package.
+### README files
+
+Each (non-internal, non-test, non-documentation) package must contain a
+`README.md` file containing at least a title, and a `pkg.go.dev` badge.
+
+The README should not be a repetition of Go doc comments.
+
+You can verify the presence of all README files with the `make verify-readmes`
+command.
+
## Style Guide
One of the primary goals of this project is that it is actually used by
@@ -560,12 +570,18 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
+See also:
+[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
+
### Testing
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
-absence of race conditions.
+absence of race conditions. The top-level tests with this term will be run
+many times in the `test-concurrent-safe` CI job to increase the chance of
+catching concurrency issues. This does not apply to subtests when this term
+is not in their root name.
### Internal packages
@@ -613,31 +629,34 @@ should be canceled.
## Approvers and Maintainers
-### Approvers
+### Triagers
-- [Evan Torrie](https://github.com/evantorrie), Verizon Media
-- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [Chester Cheung](https://github.com/hanyuancheung), Tencent
-- [Damien Mathieu](https://github.com/dmathieu), Elastic
-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+
+### Approvers
### Maintainers
+- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [David Ashpole](https://github.com/dashpole), Google
-- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk
+- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
-- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
-- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
-- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Aaron Clawson](https://github.com/MadVikingGod)
+- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Chester Cheung](https://github.com/hanyuancheung)
+- [Evan Torrie](https://github.com/evantorrie)
+- [Gustavo Silva Paiva](https://github.com/paivagustavo)
+- [Josh MacDonald](https://github.com/jmacd)
+- [Liz Fong-Jones](https://github.com/lizthegrey)
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
+repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
[Approver]: #approvers
[Maintainer]: #maintainers
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 6de95219b..a7f6d8cc6 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -1,16 +1,5 @@
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
TOOLS_MOD_DIR := ./internal/tools
@@ -25,8 +14,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
-precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
-ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
+precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
+ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
# Tools
@@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools
$(TOOLS):
@mkdir -p $@
-$(TOOLS)/%: | $(TOOLS)
+$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
$(GO) build -o $@ $(PACKAGE)
@@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-DBOTCONF = $(TOOLS)/dbotconf
-$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
-
GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
@@ -68,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
PORTO = $(TOOLS)/porto
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
-GOJQ = $(TOOLS)/gojq
-$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
-
GOTMPL = $(TOOLS)/gotmpl
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
@@ -81,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -110,7 +93,7 @@ $(PYTOOLS):
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
# Install python packages into the virtual environment.
-$(PYTOOLS)/%: | $(PYTOOLS)
+$(PYTOOLS)/%: $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
@@ -124,18 +107,18 @@ generate: go-generate vanity-import-fix
.PHONY: go-generate
go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
go-generate/%: DIR=$*
-go-generate/%: | $(STRINGER) $(GOTMPL)
+go-generate/%: $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
.PHONY: vanity-import-fix
-vanity-import-fix: | $(PORTO)
+vanity-import-fix: $(PORTO)
@$(PORTO) --include-internal -w .
# Generate go.work file for local development.
.PHONY: go-work
-go-work: | $(CROSSLINK)
+go-work: $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd)
# Build
@@ -159,12 +142,14 @@ build-tests/%:
# Tests
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
.PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
test-verbose: ARGS=-v -race
+test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
+test-concurrent-safe: TIMEOUT=120
$(TEST_TARGETS): test
test: $(OTEL_GO_MOD_DIRS:%=test/%)
test/%: DIR=$*
@@ -178,7 +163,7 @@ test/%:
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage
-test-coverage: | $(GOCOVMERGE)
+test-coverage: $(GOCOVMERGE)
@set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
@@ -192,40 +177,37 @@ test-coverage: | $(GOCOVMERGE)
done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-# Adding a directory will include all benchmarks in that directory if a filter is not specified.
-BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark
-benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
-BENCHMARK_FILTER = .
-# You can override the filter for a particular directory by adding a rule here.
-benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
benchmark/%:
- @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+ @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
&& cd $* \
- $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
.PHONY: golangci-lint golangci-lint-fix
golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$*
-golangci-lint/%: | $(GOLANGCI_LINT)
+golangci-lint/%: $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink
-crosslink: | $(CROSSLINK)
+crosslink: $(CROSSLINK)
@echo "Updating intra-repository dependencies in all go modules" \
&& $(CROSSLINK) --root=$(shell pwd) --prune
.PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$*
-go-mod-tidy/%: | crosslink
+go-mod-tidy/%: crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
- && $(GO) mod tidy -compat=1.20
+ && $(GO) mod tidy -compat=1.21
.PHONY: lint-modules
lint-modules: go-mod-tidy
@@ -234,25 +216,35 @@ lint-modules: go-mod-tidy
lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check
-vanity-import-check: | $(PORTO)
+vanity-import-check: $(PORTO)
@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell
-misspell: | $(MISSPELL)
+misspell: $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$*
-govulncheck/%: | $(GOVULNCHECK)
+govulncheck/%: $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \
&& $(GOVULNCHECK) ./...
.PHONY: codespell
-codespell: | $(CODESPELL)
+codespell: $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
+.PHONY: toolchain-check
+toolchain-check:
+ @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \
+ awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \
+ done); \
+ if [ -n "$${toolchainRes}" ]; then \
+ echo "toolchain checking failed:"; echo "$${toolchainRes}"; \
+ exit 1; \
+ fi
+
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
@@ -263,15 +255,6 @@ license-check:
exit 1; \
fi
-DEPENDABOT_CONFIG = .github/dependabot.yml
-.PHONY: dependabot-check
-dependabot-check: | $(DBOTCONF)
- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
-
-.PHONY: dependabot-generate
-dependabot-generate: | $(DBOTCONF)
- @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
-
.PHONY: check-clean-work-tree
check-clean-work-tree:
@if ! git diff --quiet; then \
@@ -284,13 +267,11 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
-semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
+semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
@@ -302,17 +283,25 @@ gorelease/%:| $(GORELEASE)
&& $(GORELEASE) \
|| echo ""
+.PHONY: verify-mods
+verify-mods: $(MULTIMOD)
+ $(MULTIMOD) verify
+
.PHONY: prerelease
-prerelease: | $(MULTIMOD)
+prerelease: verify-mods
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
+ $(MULTIMOD) prerelease -m ${MODSET}
COMMIT ?= "HEAD"
.PHONY: add-tags
-add-tags: | $(MULTIMOD)
+add-tags: verify-mods
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+ $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown
-lint-markdown:
+lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
+
+.PHONY: verify-readmes
+verify-readmes:
+ ./verify_readmes.sh
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 7766259a5..d9a192076 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -1,6 +1,6 @@
# OpenTelemetry-Go
-[](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
+[](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[](https://pkg.go.dev/go.opentelemetry.io/otel)
[](https://goreportcard.com/report/go.opentelemetry.io/otel)
@@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s
|---------|--------------------|
| Traces | Stable |
| Metrics | Stable |
-| Logs | In development[^1] |
+| Logs | Beta[^1] |
Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@@ -47,23 +47,22 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments.
-| OS | Go Version | Architecture |
-|---------|------------|--------------|
-| Ubuntu | 1.22 | amd64 |
-| Ubuntu | 1.21 | amd64 |
-| Ubuntu | 1.20 | amd64 |
-| Ubuntu | 1.22 | 386 |
-| Ubuntu | 1.21 | 386 |
-| Ubuntu | 1.20 | 386 |
-| MacOS | 1.22 | amd64 |
-| MacOS | 1.21 | amd64 |
-| MacOS | 1.20 | amd64 |
-| Windows | 1.22 | amd64 |
-| Windows | 1.21 | amd64 |
-| Windows | 1.20 | amd64 |
-| Windows | 1.22 | 386 |
-| Windows | 1.21 | 386 |
-| Windows | 1.20 | 386 |
+| OS | Go Version | Architecture |
+|----------|------------|--------------|
+| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.22 | amd64 |
+| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.22 | 386 |
+| Linux | 1.23 | arm64 |
+| Linux | 1.22 | arm64 |
+| macOS 13 | 1.23 | amd64 |
+| macOS 13 | 1.22 | amd64 |
+| macOS | 1.23 | arm64 |
+| macOS | 1.22 | arm64 |
+| Windows | 1.23 | amd64 |
+| Windows | 1.22 | amd64 |
+| Windows | 1.23 | 386 |
+| Windows | 1.22 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
@@ -90,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
to use the
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
-package. The included [examples](./example/) are a good way to see some
-practical uses of this process.
+package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
+are a good way to see some practical uses of this process.
### Export
@@ -100,12 +99,12 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-| Exporter | Metrics | Traces |
-|---------------------------------------|:-------:|:------:|
-| [OTLP](./exporters/otlp/) | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | ✓ |
+| Exporter | Logs | Metrics | Traces |
+|---------------------------------------|:----:|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | | ✓ |
## Contributing
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index d2691d0bd..4ebef4f9d 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+## Verify changes for contrib repository
+
+If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
+
+Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
+
## Pre-Release
First, decide which module sets will be released and update their versions
@@ -63,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
- Move all the `Unreleased` changes into a new section following the title scheme (`[] - `).
+ - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future.
- Update all the appropriate links at the bottom.
4. Push the changes to upstream and create a Pull Request on GitHub.
@@ -104,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
Finally create a Release for the new `` on GitHub.
The release body should include all the release notes from the Changelog for this release.
-## Verify Examples
-
-After releasing verify that examples build outside of the repository.
-
-```
-./verify_examples.sh
-```
-
-The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
-This ensures they build with the published release, not the local copy.
-
## Post-Release
### Contrib Repository
@@ -134,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just
Bump the dependencies in the following Go services:
-- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
+- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
index 412f1e362..b8cb605c1 100644
--- a/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -26,7 +26,7 @@ is designed so the following goals can be achieved.
go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
(e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
paths used in `go get` commands (e.g., `go get
- go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a
+ go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a
`@v2.0.1` in that example. One way to think about it is that the module
name now includes the `/v2`, so include `/v2` whenever you are using the
module name).
diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md
new file mode 100644
index 000000000..5b3da8f14
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/README.md
@@ -0,0 +1,3 @@
+# Attribute
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
index dafe7424d..eef51ebc2 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/doc.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index fe2bc5766..318e42fca 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
index 638c213d5..be9cd922d 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
index 841b271fb..f2ba89ce4 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
index 0656a04e4..d9a22c650 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
index 1ddf3ce05..3028f9a40 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index fb6da5145..6cbefcead 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -1,24 +1,14 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
+ "cmp"
"encoding/json"
"reflect"
+ "slices"
"sort"
- "sync"
)
type (
@@ -26,23 +16,33 @@ type (
// immutable set of attributes, with an internal cache for storing
// attribute encodings.
//
- // This type supports the Equivalent method of comparison using values of
- // type Distinct.
+ // This type will remain comparable for backwards compatibility. The
+ // equivalence of Sets across versions is not guaranteed to be stable.
+ // Prior versions may find two Sets to be equal or not when compared
+ // directly (i.e. ==), but subsequent versions may not. Users should use
+ // the Equals method to ensure stable equivalence checking.
+ //
+ // Users should also use the Distinct returned from Equivalent as a map key
+ // instead of a Set directly. In addition to that type providing guarantees
+ // on stable equivalence, it may also provide performance improvements.
Set struct {
equivalent Distinct
}
- // Distinct wraps a variable-size array of KeyValue, constructed with keys
- // in sorted order. This can be used as a map key or for equality checking
- // between Sets.
+ // Distinct is a unique identifier of a Set.
+ //
+ // Distinct is designed to be ensures equivalence stability: comparisons
+ // will return the save value across versions. For this reason, Distinct
+ // should always be used as a map key instead of a Set.
Distinct struct {
iface interface{}
}
- // Sortable implements sort.Interface, used for sorting KeyValue. This is
- // an exported type to support a memory optimization. A pointer to one of
- // these is needed for the call to sort.Stable(), which the caller may
- // provide in order to avoid an allocation. See NewSetWithSortable().
+ // Sortable implements sort.Interface, used for sorting KeyValue.
+ //
+ // Deprecated: This type is no longer used. It was added as a performance
+ // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
+ // longer supported by the module).
Sortable []KeyValue
)
@@ -56,12 +56,6 @@ var (
iface: [0]KeyValue{},
},
}
-
- // sortables is a pool of Sortables used to create Sets with a user does
- // not provide one.
- sortables = sync.Pool{
- New: func() interface{} { return new(Sortable) },
- }
)
// EmptySet returns a reference to a Set with no elements.
@@ -187,13 +181,7 @@ func empty() Set {
// Except for empty sets, this method adds an additional allocation compared
// with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- srt := sortables.Get().(*Sortable)
- s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
- sortables.Put(srt)
+ s, _ := NewSetWithFiltered(kvs, nil)
return s
}
@@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set {
// NewSetWithSortableFiltered for more details.
//
// This call includes a Sortable option as a memory optimization.
-func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
+//
+// Deprecated: Use [NewSet] instead.
+func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
return s
}
@@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if len(kvs) == 0 {
return empty(), nil
}
- srt := sortables.Get().(*Sortable)
- s, filtered := NewSetWithSortableFiltered(kvs, srt, filter)
- sortables.Put(srt)
- return s, filtered
-}
-
-// NewSetWithSortableFiltered returns a new Set.
-//
-// Duplicate keys are eliminated by taking the last value. This
-// re-orders the input slice so that unique last-values are contiguous
-// at the end of the slice.
-//
-// This ensures the following:
-//
-// - Last-value-wins semantics
-// - Caller sees the reordering, but doesn't lose values
-// - Repeated call preserve last-value wins.
-//
-// Note that methods are defined on Set, although this returns Set. Callers
-// can avoid memory allocations by:
-//
-// - allocating a Sortable for use as a temporary in this method
-// - allocating a Set for storing the return value of this constructor.
-//
-// The result maintains a cache of encoded attributes, by attribute.EncoderID.
-// This value should not be copied after its first use.
-//
-// The second []KeyValue return value is a list of attributes that were
-// excluded by the Filter (if non-nil).
-func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
-
- *tmp = kvs
// Stable sort so the following de-duplication can implement
// last-value-wins semantics.
- sort.Stable(tmp)
-
- *tmp = nil
+ slices.SortStableFunc(kvs, func(a, b KeyValue) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
position := len(kvs) - 1
offset := position - 1
@@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S
return Set{equivalent: computeDistinct(kvs)}, nil
}
+// NewSetWithSortableFiltered returns a new Set.
+//
+// Duplicate keys are eliminated by taking the last value. This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on Set, although this returns Set. Callers
+// can avoid memory allocations by:
+//
+// - allocating a Sortable for use as a temporary in this method
+// - allocating a Set for storing the return value of this constructor.
+//
+// The result maintains a cache of encoded attributes, by attribute.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second []KeyValue return value is a list of attributes that were
+// excluded by the Filter (if non-nil).
+//
+// Deprecated: Use [NewSetWithFiltered] instead.
+func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
+ return NewSetWithFiltered(kvs, filter)
+}
+
// filteredToFront filters slice in-place using keep function. All KeyValues that need to
// be removed are moved to the front. All KeyValues that need to be kept are
// moved (in-order) to the back. The index for the first KeyValue to be kept is
@@ -368,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) {
case 1:
- ptr := new([1]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [1]KeyValue(kvs)
case 2:
- ptr := new([2]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [2]KeyValue(kvs)
case 3:
- ptr := new([3]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [3]KeyValue(kvs)
case 4:
- ptr := new([4]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [4]KeyValue(kvs)
case 5:
- ptr := new([5]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [5]KeyValue(kvs)
case 6:
- ptr := new([6]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [6]KeyValue(kvs)
case 7:
- ptr := new([7]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [7]KeyValue(kvs)
case 8:
- ptr := new([8]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [8]KeyValue(kvs)
case 9:
- ptr := new([9]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [9]KeyValue(kvs)
case 10:
- ptr := new([10]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [10]KeyValue(kvs)
default:
return nil
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index cb21dd5c0..9ea0ecbbd 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
@@ -242,15 +231,27 @@ func (v Value) Emit() string {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(v.asInt64Slice())
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(v.asFloat64Slice())
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(v.asStringSlice())
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
case STRING:
return v.stringly
default:
diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md
new file mode 100644
index 000000000..7d798435e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/README.md
@@ -0,0 +1,3 @@
+# Baggage
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 7d27cf77d..0e1fe2422 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/baggage"
@@ -19,6 +8,7 @@ import (
"fmt"
"net/url"
"strings"
+ "unicode/utf8"
"go.opentelemetry.io/otel/internal/baggage"
)
@@ -54,9 +44,15 @@ type Property struct {
// NewKeyProperty returns a new Property for key.
//
+// The passed key must be valid, non-empty UTF-8 string.
// If key is invalid, an error will be returned.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
func NewKeyProperty(key string) (Property, error) {
- if !validateKey(key) {
+ if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
@@ -67,11 +63,15 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value.
//
// The passed key must be compliant with W3C Baggage specification.
-// The passed value must be precent-encoded as defined in W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewKeyValuePropertyRaw] instead
-// that does not require precent-encoding of the value.
+// that does not require percent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
if !validateValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@@ -84,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
// NewKeyValuePropertyRaw returns a new Property for key with value.
//
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
- if !validateKey(key) {
+ if !validateBaggageName(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
+ if !validateBaggageValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
p := Property{
key: key,
@@ -125,12 +134,15 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
- if !validateKey(p.key) {
+ if !validateBaggageName(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}
if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value"))
}
+ if p.hasValue && !validateBaggageValue(p.value) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+ }
return nil
}
@@ -148,7 +160,15 @@ func (p Property) Value() (string, bool) {
// String encodes Property into a header string compliant with the W3C Baggage
// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (p Property) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(p.key) {
+ return ""
+ }
+
if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
}
@@ -213,9 +233,14 @@ func (p properties) validate() error {
// String encodes properties into a header string compliant with the W3C Baggage
// specification.
func (p properties) String() string {
- props := make([]string, len(p))
- for i, prop := range p {
- props[i] = prop.String()
+ props := make([]string, 0, len(p))
+ for _, prop := range p {
+ s := prop.String()
+
+ // Ignored empty properties.
+ if s != "" {
+ props = append(props, s)
+ }
}
return strings.Join(props, propertyDelimiter)
}
@@ -232,14 +257,18 @@ type Member struct {
hasData bool
}
-// NewMemberRaw returns a new Member from the passed arguments.
+// NewMember returns a new Member from the passed arguments.
//
// The passed key must be compliant with W3C Baggage specification.
-// The passed value must be precent-encoded as defined in W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewMemberRaw] instead
-// that does not require precent-encoding of the value.
+// that does not require percent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
if !validateValue(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
@@ -252,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
// NewMemberRaw returns a new Member from the passed arguments.
//
-// The passed key must be compliant with W3C Baggage specification.
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on baggage key.
+// For example, the W3C Baggage specification restricts the baggage keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{
key: key,
@@ -304,19 +339,45 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
- val := strings.TrimSpace(v)
- if !validateValue(val) {
+ rawVal := strings.TrimSpace(v)
+ if !validateValue(rawVal) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
}
- // Decode a precent-encoded value.
- value, err := url.PathUnescape(val)
+ // Decode a percent-encoded value.
+ unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
+ return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
}
+
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
return Member{key: key, value: value, properties: props, hasData: true}, nil
}
+// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
+func replaceInvalidUTF8Sequences(c int, unescapeVal string) string {
+ if utf8.ValidString(unescapeVal) {
+ return unescapeVal
+ }
+ // W3C baggage spec:
+ // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
+
+ var b strings.Builder
+ b.Grow(c)
+ for i := 0; i < len(unescapeVal); {
+ r, size := utf8.DecodeRuneInString(unescapeVal[i:])
+ if r == utf8.RuneError && size == 1 {
+ // Invalid UTF-8 sequence found, replace it with '�'
+ _, _ = b.WriteString("�")
+ } else {
+ _, _ = b.WriteRune(r)
+ }
+ i += size
+ }
+
+ return b.String()
+}
+
// validate ensures m conforms to the W3C Baggage specification.
// A key must be an ASCII string, returning an error otherwise.
func (m Member) validate() error {
@@ -324,9 +385,12 @@ func (m Member) validate() error {
return fmt.Errorf("%w: %q", errInvalidMember, m)
}
- if !validateKey(m.key) {
+ if !validateBaggageName(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
+ if !validateBaggageValue(m.value) {
+ return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+ }
return m.properties.validate()
}
@@ -341,13 +405,18 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a header string compliant with the W3C Baggage
// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (m Member) String() string {
- // A key is just an ASCII string. A value is restricted to be
- // US-ASCII characters excluding CTLs, whitespace,
- // DQUOTE, comma, semicolon, and backslash.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(m.key) {
+ return ""
+ }
+
+ s := m.key + keyValueDelimiter + valueEscape(m.value)
if len(m.properties) > 0 {
- s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
+ s += propertyDelimiter + m.properties.String()
}
return s
}
@@ -458,7 +527,7 @@ func (b Baggage) Member(key string) Member {
}
// Members returns all the baggage list-members.
-// The order of the returned list-members does not have significance.
+// The order of the returned list-members is not significant.
//
// The returned members are not validated, as we assume the validation happened
// when they were added to the Baggage.
@@ -479,8 +548,8 @@ func (b Baggage) Members() []Member {
return members
}
-// SetMember returns a copy the Baggage with the member included. If the
-// baggage contains a Member with the same key the existing Member is
+// SetMember returns a copy of the Baggage with the member included. If the
+// baggage contains a Member with the same key, the existing Member is
// replaced.
//
// If member is invalid according to the W3C Baggage specification, an error
@@ -538,14 +607,22 @@ func (b Baggage) Len() int {
// String encodes Baggage into a header string compliant with the W3C Baggage
// specification.
+// It would ignore members where the member key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
func (b Baggage) String() string {
members := make([]string, 0, len(b.list))
for k, v := range b.list {
- members = append(members, Member{
+ s := Member{
key: k,
value: v.Value,
properties: fromInternalProperties(v.Properties),
- }.String())
+ }.String()
+
+ // Ignored empty members.
+ if s != "" {
+ members = append(members, s)
+ }
}
return strings.Join(members, listDelimiter)
}
@@ -616,11 +693,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
return
}
- // Decode a precent-encoded value.
- value, err := url.PathUnescape(s[valueStart:valueEnd])
+ // Decode a percent-encoded value.
+ rawVal := s[valueStart:valueEnd]
+ unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
return
}
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
ok = true
p.key = s[keyStart:keyEnd]
@@ -641,6 +720,113 @@ func skipSpace(s string, offset int) int {
return i
}
+var safeKeyCharset = [utf8.RuneSelf]bool{
+ // 0x23 to 0x27
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+
+ // 0x30 to 0x39
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+
+ // 0x41 to 0x5a
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+
+ // 0x5e to 0x7a
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+
+ // remainder
+ '!': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '|': true,
+ '~': true,
+}
+
+// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
+// Baggage name is a valid, non-empty UTF-8 string.
+func validateBaggageName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ return utf8.ValidString(s)
+}
+
+// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
+// Baggage value is a valid UTF-8 strings.
+// Empty string is also a valid UTF-8 string.
+func validateBaggageValue(s string) bool {
+ return utf8.ValidString(s)
+}
+
+// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool {
if len(s) == 0 {
return false
@@ -656,19 +842,10 @@ func validateKey(s string) bool {
}
func validateKeyChar(c int32) bool {
- return (c >= 0x23 && c <= 0x27) ||
- (c >= 0x30 && c <= 0x39) ||
- (c >= 0x41 && c <= 0x5a) ||
- (c >= 0x5e && c <= 0x7a) ||
- c == 0x21 ||
- c == 0x2a ||
- c == 0x2b ||
- c == 0x2d ||
- c == 0x2e ||
- c == 0x7c ||
- c == 0x7e
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
}
+// validateValue checks if the string is a valid W3C Baggage value.
func validateValue(s string) bool {
for _, c := range s {
if !validateValueChar(c) {
@@ -679,12 +856,109 @@ func validateValue(s string) bool {
return true
}
+var safeValueCharset = [utf8.RuneSelf]bool{
+ '!': true, // 0x21
+
+ // 0x23 to 0x2b
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+
+ // 0x2d to 0x3a
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+
+ // 0x3c to 0x5b
+ '<': true, // 0x3C
+ '=': true, // 0x3D
+ '>': true, // 0x3E
+ '?': true, // 0x3F
+ '@': true, // 0x40
+ 'A': true, // 0x41
+ 'B': true, // 0x42
+ 'C': true, // 0x43
+ 'D': true, // 0x44
+ 'E': true, // 0x45
+ 'F': true, // 0x46
+ 'G': true, // 0x47
+ 'H': true, // 0x48
+ 'I': true, // 0x49
+ 'J': true, // 0x4A
+ 'K': true, // 0x4B
+ 'L': true, // 0x4C
+ 'M': true, // 0x4D
+ 'N': true, // 0x4E
+ 'O': true, // 0x4F
+ 'P': true, // 0x50
+ 'Q': true, // 0x51
+ 'R': true, // 0x52
+ 'S': true, // 0x53
+ 'T': true, // 0x54
+ 'U': true, // 0x55
+ 'V': true, // 0x56
+ 'W': true, // 0x57
+ 'X': true, // 0x58
+ 'Y': true, // 0x59
+ 'Z': true, // 0x5A
+ '[': true, // 0x5B
+
+ // 0x5d to 0x7e
+ ']': true, // 0x5D
+ '^': true, // 0x5E
+ '_': true, // 0x5F
+ '`': true, // 0x60
+ 'a': true, // 0x61
+ 'b': true, // 0x62
+ 'c': true, // 0x63
+ 'd': true, // 0x64
+ 'e': true, // 0x65
+ 'f': true, // 0x66
+ 'g': true, // 0x67
+ 'h': true, // 0x68
+ 'i': true, // 0x69
+ 'j': true, // 0x6A
+ 'k': true, // 0x6B
+ 'l': true, // 0x6C
+ 'm': true, // 0x6D
+ 'n': true, // 0x6E
+ 'o': true, // 0x6F
+ 'p': true, // 0x70
+ 'q': true, // 0x71
+ 'r': true, // 0x72
+ 's': true, // 0x73
+ 't': true, // 0x74
+ 'u': true, // 0x75
+ 'v': true, // 0x76
+ 'w': true, // 0x77
+ 'x': true, // 0x78
+ 'y': true, // 0x79
+ 'z': true, // 0x7A
+ '{': true, // 0x7B
+ '|': true, // 0x7C
+ '}': true, // 0x7D
+ '~': true, // 0x7E
+}
+
func validateValueChar(c int32) bool {
- return c == 0x21 ||
- (c >= 0x23 && c <= 0x2b) ||
- (c >= 0x2d && c <= 0x3a) ||
- (c >= 0x3c && c <= 0x5b) ||
- (c >= 0x5d && c <= 0x7e)
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c]
}
// valueEscape escapes the string so it can be safely placed inside a baggage value,
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
index 24b34b756..a572461a0 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/context.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
index 4545100df..b51d87cab 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/doc.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package baggage provides functionality for storing and retrieving
diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md
new file mode 100644
index 000000000..24c52b387
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/README.md
@@ -0,0 +1,3 @@
+# Codes
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/codes)
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 587ebae4e..49a35b122 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -1,21 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package codes // import "go.opentelemetry.io/otel/codes"
import (
"encoding/json"
+ "errors"
"fmt"
"strconv"
)
@@ -74,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return nil
}
if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+ return errors.New("nil receiver passed to UnmarshalJSON")
}
var x interface{}
@@ -94,7 +84,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return fmt.Errorf("invalid code: %q", ci)
}
- *c = Code(ci)
+ *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
return nil
}
return fmt.Errorf("invalid code: %q", string(b))
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
index 4e328fbb4..ee8db448b 100644
--- a/vendor/go.opentelemetry.io/otel/codes/doc.go
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package codes defines the canonical error codes used by OpenTelemetry.
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index 36d7c24e8..921f85961 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package otel provides global access to the OpenTelemetry API. The subpackages of
@@ -28,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
To read more about metrics, see go.opentelemetry.io/otel/metric.
+To read more about logs, see go.opentelemetry.io/otel/log.
+
To read more about propagation, see go.opentelemetry.io/otel/propagation and
go.opentelemetry.io/otel/baggage.
*/
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
index 72fad8541..67414c71e 100644
--- a/vendor/go.opentelemetry.io/otel/error_handler.go
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
index 9a58fb1d3..93e80ea30 100644
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
@@ -1,18 +1,7 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
index 4115fe3bb..07623b679 100644
--- a/vendor/go.opentelemetry.io/otel/handler.go
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
@@ -18,12 +7,8 @@ import (
"go.opentelemetry.io/otel/internal/global"
)
-var (
- // Compile-time check global.ErrDelegator implements ErrorHandler.
- _ ErrorHandler = (*global.ErrDelegator)(nil)
- // Compile-time check global.ErrLogger implements ErrorHandler.
- _ ErrorHandler = (*global.ErrLogger)(nil)
-)
+// Compile-time check global.ErrDelegator implements ErrorHandler.
+var _ ErrorHandler = (*global.ErrDelegator)(nil)
// GetErrorHandler returns the global ErrorHandler instance.
//
@@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) { global.Handle(err) }
+// Handle is a convenience function for GetErrorHandler().Handle(err).
+func Handle(err error) { global.GetErrorHandler().Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
index 622c3ee3f..691d96c75 100644
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package attribute provide several helper functions for some commonly used
@@ -25,33 +14,33 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} {
var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} {
var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} {
var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} {
var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
@@ -60,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero bool
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+ cpy := make([]bool, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
@@ -74,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero int64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+ cpy := make([]int64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
@@ -88,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero float64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+ cpy := make([]float64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsStringSlice converts a string array into a slice into with same elements as array.
@@ -102,10 +88,9 @@ func AsStringSlice(v interface{}) []string {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero string
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]string)
+ cpy := make([]string, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
index b96e5408e..b4f85f44a 100644
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package baggage provides base types and functionality to store and retrieve
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
index 4469700d9..3aea9c491 100644
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package baggage // import "go.opentelemetry.io/otel/internal/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
index f532f07e9..4259f0320 100644
--- a/vendor/go.opentelemetry.io/otel/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/internal/gen.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
index 5e9b83047..c657ff8e7 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -1,38 +1,13 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"log"
- "os"
"sync/atomic"
)
-var (
- // GlobalErrorHandler provides an ErrorHandler that can be used
- // throughout an OpenTelemetry instrumented project. When a user
- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
- // `Handle` and will be delegated to the registered ErrorHandler.
- GlobalErrorHandler = defaultErrorHandler()
-
- // Compile-time check that delegator implements ErrorHandler.
- _ ErrorHandler = (*ErrDelegator)(nil)
- // Compile-time check that errLogger implements ErrorHandler.
- _ ErrorHandler = (*ErrLogger)(nil)
-)
-
// ErrorHandler handles irremediable events.
type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry
@@ -44,59 +19,18 @@ type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler]
}
-func (d *ErrDelegator) Handle(err error) {
- d.getDelegate().Handle(err)
-}
+// Compile-time check that delegator implements ErrorHandler.
+var _ ErrorHandler = (*ErrDelegator)(nil)
-func (d *ErrDelegator) getDelegate() ErrorHandler {
- return *d.delegate.Load()
+func (d *ErrDelegator) Handle(err error) {
+ if eh := d.delegate.Load(); eh != nil {
+ (*eh).Handle(err)
+ return
+ }
+ log.Print(err)
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh)
}
-
-func defaultErrorHandler() *ErrDelegator {
- d := &ErrDelegator{}
- d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
- return d
-}
-
-// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
-type ErrLogger struct {
- l *log.Logger
-}
-
-// Handle logs err if no delegate is set, otherwise it is delegated.
-func (h *ErrLogger) Handle(err error) {
- h.l.Print(err)
-}
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler {
- return GlobalErrorHandler
-}
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) {
- GlobalErrorHandler.setDelegate(h)
-}
-
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) {
- GetErrorHandler().Handle(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
index ebb13c206..ae92a4251 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -24,7 +13,7 @@ import (
// unwrapper unwraps to return the underlying instrument implementation.
type unwrapper interface {
- Unwrap() metric.Observable
+ unwrap() metric.Observable
}
type afCounter struct {
@@ -51,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afCounter) Unwrap() metric.Observable {
+func (i *afCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableCounter)
}
@@ -82,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afUpDownCounter) Unwrap() metric.Observable {
+func (i *afUpDownCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableUpDownCounter)
}
@@ -113,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *afGauge) Unwrap() metric.Observable {
+func (i *afGauge) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableGauge)
}
@@ -144,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiCounter) Unwrap() metric.Observable {
+func (i *aiCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableCounter)
}
@@ -175,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiUpDownCounter) Unwrap() metric.Observable {
+func (i *aiUpDownCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableUpDownCounter)
}
@@ -206,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
-func (i *aiGauge) Unwrap() metric.Observable {
+func (i *aiGauge) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableGauge)
}
@@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
+type sfGauge struct {
+ embedded.Float64Gauge
+
+ name string
+ opts []metric.Float64GaugeOption
+
+ delegate atomic.Value // metric.Float64Gauge
+}
+
+var _ metric.Float64Gauge = (*sfGauge)(nil)
+
+func (i *sfGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
+ }
+}
+
type siCounter struct {
embedded.Int64Counter
@@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
}
}
+
+type siGauge struct {
+ embedded.Int64Gauge
+
+ name string
+ opts []metric.Int64GaugeOption
+
+ delegate atomic.Value // metric.Int64Gauge
+}
+
+var _ metric.Int64Gauge = (*siGauge)(nil)
+
+func (i *siGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index c6f305a2b..adbca7d34 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -23,17 +12,20 @@ import (
"github.com/go-logr/stdr"
)
-// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
+// globalLogger holds a reference to the [logr.Logger] used within
+// go.opentelemetry.io/otel.
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
-var globalLogger atomic.Pointer[logr.Logger]
+var globalLogger = func() *atomic.Pointer[logr.Logger] {
+ l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
-func init() {
- SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
-}
+ p := new(atomic.Pointer[logr.Logger])
+ p.Store(&l)
+ return p
+}()
-// SetLogger overrides the globalLogger with l.
+// SetLogger sets the global Logger to l.
//
// To see Warn messages use a logger with `l.V(1).Enabled() == true`
// To see Info messages use a logger with `l.V(4).Enabled() == true`
@@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) {
globalLogger.Store(&l)
}
-func getLogger() logr.Logger {
+// GetLogger returns the global logger.
+func GetLogger() logr.Logger {
return *globalLogger.Load()
}
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
- getLogger().V(4).Info(msg, keysAndValues...)
+ GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
- getLogger().Error(err, msg, keysAndValues...)
+ GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
- getLogger().V(8).Info(msg, keysAndValues...)
+ GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
func Warn(msg string, keysAndValues ...interface{}) {
- getLogger().V(1).Info(msg, keysAndValues...)
+ GetLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index 7ed61c0e2..a6acd8dca 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -1,23 +1,13 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"container/list"
+ "context"
+ "reflect"
"sync"
- "sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
@@ -76,6 +66,8 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
key := il{
name: name,
version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
}
if p.meters == nil {
@@ -86,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
return val
}
- t := &meter{name: name, opts: opts}
+ t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
p.meters[key] = t
return t
}
@@ -102,17 +94,29 @@ type meter struct {
opts []metric.MeterOption
mtx sync.Mutex
- instruments []delegatedInstrument
+ instruments map[instID]delegatedInstrument
registry list.List
- delegate atomic.Value // metric.Meter
+ delegate metric.Meter
}
type delegatedInstrument interface {
setDelegate(metric.Meter)
}
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // name is the name of the stream.
+ name string
+ // description is the description of the stream.
+ description string
+ // kind defines the functional group of the instrument.
+ kind reflect.Type
+ // unit is the unit of the stream.
+ unit string
+}
+
// setDelegate configures m to delegate all Meter functionality to Meters
// created by provider.
//
@@ -120,12 +124,12 @@ type delegatedInstrument interface {
//
// It is guaranteed by the caller that this happens only once.
func (m *meter) setDelegate(provider metric.MeterProvider) {
- meter := provider.Meter(m.name, m.opts...)
- m.delegate.Store(meter)
-
m.mtx.Lock()
defer m.mtx.Unlock()
+ meter := provider.Meter(m.name, m.opts...)
+ m.delegate = meter
+
for _, inst := range m.instruments {
inst.setDelegate(meter)
}
@@ -143,147 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
}
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64Counter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Counter(name, options...)
+ }
+
+ cfg := metric.NewInt64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Counter), nil
+ }
i := &siCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64UpDownCounter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64UpDownCounter), nil
+ }
i := &siUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64Histogram(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Histogram(name, options...)
+ }
+
+ cfg := metric.NewInt64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Histogram), nil
+ }
i := &siHistogram{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
-func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableCounter(name, options...)
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Gauge(name, options...)
}
+
+ cfg := metric.NewInt64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Gauge), nil
+ }
+ i := &siGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableCounter), nil
+ }
i := &aiCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableUpDownCounter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableUpDownCounter), nil
+ }
i := &aiUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableGauge(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableGauge), nil
+ }
i := &aiGauge{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64Counter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Counter(name, options...)
+ }
+
+ cfg := metric.NewFloat64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Counter), nil
+ }
i := &sfCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64UpDownCounter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64UpDownCounter), nil
+ }
i := &sfUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64Histogram(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Histogram(name, options...)
+ }
+
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Histogram), nil
+ }
i := &sfHistogram{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
-func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableCounter(name, options...)
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Gauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
}
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Gauge), nil
+ }
+ i := &sfGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableCounter), nil
+ }
i := &afCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableUpDownCounter(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableUpDownCounter), nil
+ }
i := &afUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableGauge(name, options...)
- }
m.mtx.Lock()
defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableGauge), nil
+ }
i := &afGauge{name: name, opts: options}
- m.instruments = append(m.instruments, i)
+ m.instruments[id] = i
return i, nil
}
// RegisterCallback captures the function that will be called during Collect.
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- insts = unwrapInstruments(insts)
- return del.RegisterCallback(f, insts...)
- }
-
m.mtx.Lock()
defer m.mtx.Unlock()
+ if m.delegate != nil {
+ return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
+ }
+
reg := ®istration{instruments: insts, function: f}
e := m.registry.PushBack(reg)
reg.unreg = func() error {
@@ -295,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
return reg, nil
}
-type wrapped interface {
- unwrap() metric.Observable
-}
-
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
out := make([]metric.Observable, 0, len(instruments))
for _, inst := range instruments {
- if in, ok := inst.(wrapped); ok {
+ if in, ok := inst.(unwrapper); ok {
out = append(out, in.unwrap())
} else {
out = append(out, inst)
@@ -323,9 +512,61 @@ type registration struct {
unregMu sync.Mutex
}
-func (c *registration) setDelegate(m metric.Meter) {
- insts := unwrapInstruments(c.instruments)
+type unwrapObs struct {
+ embedded.Observer
+ obs metric.Observer
+}
+// unwrapFloat64Observable returns an expected metric.Float64Observable after
+// unwrapping the global object.
+func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
+ // Note: if the unwrapped object does not
+ // unwrap as an observable for either of the
+ // predicates here, it means an internal bug in
+ // this package. We avoid logging an error in
+ // this case, because the SDK has to try its
+ // own type conversion on the object. The SDK
+ // will see this and be forced to respond with
+ // its own error.
+ //
+ // This code uses a double-nested if statement
+ // to avoid creating a branch that is
+ // impossible to cover.
+ inst = floatObs
+ }
+ }
+ return inst
+}
+
+// unwrapInt64Observable returns an expected metric.Int64Observable after
+// unwrapping the global object.
+func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
+ // See the comment in unwrapFloat64Observable().
+ inst = unint
+ }
+ }
+ return inst
+}
+
+func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
+}
+
+func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
+}
+
+func unwrapCallback(f metric.Callback) metric.Callback {
+ return func(ctx context.Context, obs metric.Observer) error {
+ return f(ctx, &unwrapObs{obs: obs})
+ }
+}
+
+func (c *registration) setDelegate(m metric.Meter) {
c.unregMu.Lock()
defer c.unregMu.Unlock()
@@ -334,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) {
return
}
- reg, err := m.RegisterCallback(c.function, insts...)
+ reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
if err != nil {
GetErrorHandler().Handle(err)
+ return
}
c.unreg = reg.Unregister
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
index 06bac35c2..38560ff99 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
index 386c8bfdc..204ea142a 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -25,6 +14,10 @@ import (
)
type (
+ errorHandlerHolder struct {
+ eh ErrorHandler
+ }
+
tracerProviderHolder struct {
tp trace.TracerProvider
}
@@ -39,15 +32,59 @@ type (
)
var (
+ globalErrorHandler = defaultErrorHandler()
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalMeterProvider = defaultMeterProvider()
+ delegateErrorHandlerOnce sync.Once
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
delegateMeterOnce sync.Once
)
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+ return globalErrorHandler.Load().(errorHandlerHolder).eh
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+ current := GetErrorHandler()
+
+ if _, cOk := current.(*ErrDelegator); cOk {
+ if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
+ // Do not assign to the delegate of the default ErrDelegator to be
+ // itself.
+ Error(
+ errors.New("no ErrorHandler delegate configured"),
+ "ErrorHandler remains its current value.",
+ )
+ return
+ }
+ }
+
+ delegateErrorHandlerOnce.Do(func() {
+ if def, ok := current.(*ErrDelegator); ok {
+ def.setDelegate(h)
+ }
+ })
+ globalErrorHandler.Store(errorHandlerHolder{eh: h})
+}
+
// TracerProvider is the internal implementation for global.TracerProvider.
func TracerProvider() trace.TracerProvider {
return globalTracer.Load().(tracerProviderHolder).tp
@@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) {
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
+func defaultErrorHandler() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
+ return v
+}
+
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 3f61ec12a..8982aa0dc 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package global // import "go.opentelemetry.io/otel/internal/global"
@@ -36,6 +25,7 @@ import (
"sync"
"sync/atomic"
+ "go.opentelemetry.io/auto/sdk"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
@@ -97,6 +87,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
key := il{
name: name,
version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
}
if p.tracers == nil {
@@ -115,6 +107,8 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
type il struct {
name string
version string
+ schema string
+ attrs attribute.Set
}
// tracer is a placeholder for a trace.Tracer.
@@ -152,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
return delegate.(trace.Tracer).Start(ctx, name, opts...)
}
+ return t.newSpan(ctx, autoInstEnabled, name, opts)
+}
+
+// autoInstEnabled determines if the auto-instrumentation SDK span is returned
+// from the tracer when not backed by a delegate and auto-instrumentation has
+// attached to this process.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches. By default, this will point to false and mean a tracer will return
+// a nonRecordingSpan by default.
+var autoInstEnabled = new(bool)
+
+func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) {
+ // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
+ // so the auto-instrumentation can define a uprobe for (*t).newSpan and be
+ // provided with the address of the bool autoInstEnabled points to. It
+ // needs to be a parameter so that pointer can be reliably determined, it
+ // should not be read from the global.
+
+ if *autoSpan {
+ tracer := sdk.TracerProvider().Tracer(t.name, t.opts...)
+ return tracer.Start(ctx, name, opts...)
+ }
+
s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
ctx = trace.ContextWithSpan(ctx, s)
return ctx, s
@@ -193,6 +211,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
// AddEvent does nothing.
func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
// SetName does nothing.
func (nonRecordingSpan) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
index e07e79400..b2fe3e41d 100644
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
@@ -31,11 +20,13 @@ func RawToBool(r uint64) bool {
}
func Int64ToRaw(i int64) uint64 {
- return uint64(i)
+ // Assumes original was a valid int64 (overflow not checked).
+ return uint64(i) // nolint: gosec
}
func RawToInt64(r uint64) int64 {
- return int64(r)
+ // Assumes original was a valid int64 (overflow not checked).
+ return int64(r) // nolint: gosec
}
func Float64ToRaw(f float64) uint64 {
@@ -47,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
}
func RawPtrToFloat64Ptr(r *uint64) *float64 {
- return (*float64)(unsafe.Pointer(r))
+ // Assumes original was a valid *float64 (overflow not checked).
+ return (*float64)(unsafe.Pointer(r)) // nolint: gosec
}
func RawPtrToInt64Ptr(r *uint64) *int64 {
- return (*int64)(unsafe.Pointer(r))
+ // Assumes original was a valid *int64 (overflow not checked).
+ return (*int64)(unsafe.Pointer(r)) // nolint: gosec
}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
index c4f8acd5d..6de7f2e4d 100644
--- a/vendor/go.opentelemetry.io/otel/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
index f95517195..1e6473b32 100644
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md
new file mode 100644
index 000000000..0cf902e01
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/README.md
@@ -0,0 +1,3 @@
+# Metric API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
index 072baa8e8..f8435d8f2 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -50,7 +39,7 @@ type Float64ObservableCounter interface {
}
// Float64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableCounterConfig struct {
description string
unit string
@@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface {
}
// Float64ObservableUpDownCounterConfig contains options for asynchronous
-// counter instruments that record int64 values.
+// counter instruments that record float64 values.
type Float64ObservableUpDownCounterConfig struct {
description string
unit string
@@ -165,7 +154,7 @@ type Float64ObservableGauge interface {
}
// Float64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64ObservableGaugeConfig struct {
description string
unit string
@@ -224,7 +213,7 @@ type Float64Observer interface {
}
// Float64Callback is a function registered with a Meter that makes
-// observations for a Float64Observerable instrument it is registered with.
+// observations for a Float64Observable instrument it is registered with.
// Calls to the Float64Observer record measurement values for the
// Float64Observable.
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
index 9bd6ebf02..e079aaef1 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -223,7 +212,7 @@ type Int64Observer interface {
}
// Int64Callback is a function registered with a Meter that makes observations
-// for an Int64Observerable instrument it is registered with. Calls to the
+// for an Int64Observable instrument it is registered with. Calls to the
// Int64Observer record measurement values for the Int64Observable.
//
// The function needs to complete in a finite amount of time and the deadline
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
index 778ad2d74..d9e3b13e4 100644
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
index 54716e13b..f153745b0 100644
--- a/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package metric provides the OpenTelemetry API used to measure metrics about
@@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and
See the [OpenTelemetry documentation] for more information about instruments
and their intended use.
+# Instrument Name
+
+OpenTelemetry defines an [instrument name syntax] that restricts what
+instrument names are allowed.
+
+Instrument names should ...
+
+ - Not be empty.
+ - Have an alphabetic character as their first letter.
+ - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’,
+ ‘-’, or ‘/’.
+ - Have a maximum length of 255 letters.
+
+To ensure compatibility with observability platforms, all instruments created
+need to conform to this syntax. Not all implementations of the API will validate
+these names, it is the callers responsibility to ensure compliance.
+
# Measurements
Measurements are made by recording values and information about the values with
@@ -164,6 +170,7 @@ It is strongly recommended that authors only embed
That implementation is the only one OpenTelemetry authors can guarantee will
fully implement all the API interfaces when a user updates their API.
+[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax
[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
*/
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
new file mode 100644
index 000000000..1f6e0efa7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
@@ -0,0 +1,3 @@
+# Metric Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
index ae0bdbd2e..1a9dc6809 100644
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package embedded provides interfaces embedded within the [OpenTelemetry
// metric API].
@@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() }
// the API package).
type Float64Histogram interface{ float64Histogram() }
+// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64Gauge interface{ float64Gauge() }
+
// Float64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
//
@@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() }
// the API package).
type Int64Histogram interface{ int64Histogram() }
+// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
+// a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64Gauge interface{ int64Gauge() }
+
// Int64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
index be89cd533..a535782e1 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -27,6 +16,7 @@ type InstrumentOption interface {
Int64CounterOption
Int64UpDownCounterOption
Int64HistogramOption
+ Int64GaugeOption
Int64ObservableCounterOption
Int64ObservableUpDownCounterOption
Int64ObservableGaugeOption
@@ -34,6 +24,7 @@ type InstrumentOption interface {
Float64CounterOption
Float64UpDownCounterOption
Float64HistogramOption
+ Float64GaugeOption
Float64ObservableCounterOption
Float64ObservableUpDownCounterOption
Float64ObservableGaugeOption
@@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.description = string(o)
return c
@@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.description = string(o)
return c
@@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra
return c
}
+func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.unit = string(o)
return c
@@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi
return c
}
+func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.unit = string(o)
return c
@@ -340,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
//
// cp := make([]attribute.KeyValue, len(attributes))
// copy(cp, attributes)
-// WithAttributes(attribute.NewSet(cp...))
+// WithAttributeSet(attribute.NewSet(cp...))
//
// [attribute.NewSet] may modify the passed attributes so this will make a copy
// of attributes before creating a set in order to ensure this function is
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 2520bc74a..14e08c24a 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -58,17 +47,41 @@ type Meter interface {
// Int64Counter returns a new Int64Counter instrument identified by name
// and configured with options. The instrument is used to synchronously
// record increasing int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational
// operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+
// Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+
+ // Int64Gauge returns a new Int64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
+
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
@@ -78,7 +91,12 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record int64 measurements once per
@@ -88,7 +106,12 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
+
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous int64 measurements once per a
@@ -98,23 +121,51 @@ type Meter interface {
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
// Float64Counter returns a new Float64Counter instrument identified by
// name and configured with options. The instrument is used to
// synchronously record increasing float64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational
// operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+
// Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a
// computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+
+ // Float64Gauge returns a new Float64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous float64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
+
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
@@ -124,7 +175,12 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+
// Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and
// configured with options. The instrument is used to asynchronously record
@@ -134,7 +190,12 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
+
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous float64 measurements once per a
@@ -144,6 +205,10 @@ type Meter interface {
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
// RegisterCallback registers f to be called during the collection of a
@@ -189,6 +254,7 @@ type Observer interface {
// ObserveFloat64 records the float64 value for obsrv.
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+
// ObserveInt64 records the int64 value for obsrv.
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 000000000..bb8969435
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
index acc9a670b..ca6fcbdc0 100644
--- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package noop provides an implementation of the OpenTelemetry metric API that
// produces no telemetry and minimizes used computation resources.
@@ -43,6 +32,8 @@ var (
_ metric.Float64UpDownCounter = Float64UpDownCounter{}
_ metric.Int64Histogram = Int64Histogram{}
_ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
_ metric.Int64ObservableCounter = Int64ObservableCounter{}
_ metric.Float64ObservableCounter = Float64ObservableCounter{}
_ metric.Int64ObservableGauge = Int64ObservableGauge{}
@@ -87,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6
return Int64Histogram{}, nil
}
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
// Int64ObservableCounter returns an ObservableCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
@@ -123,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.
return Float64Histogram{}, nil
}
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
// Float64ObservableCounter returns an ObservableCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
@@ -208,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram }
// Record performs no operation.
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
// int64 measurements. It produces no telemetry.
type Int64ObservableCounter struct {
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
index 0a4825ae6..8403a4bad 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -39,7 +28,7 @@ type Float64Counter interface {
}
// Float64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
+// record float64 values.
type Float64CounterConfig struct {
description string
unit string
@@ -92,7 +81,7 @@ type Float64UpDownCounter interface {
}
// Float64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
+// instruments that record float64 values.
type Float64UpDownCounterConfig struct {
description string
unit string
@@ -144,8 +133,8 @@ type Float64Histogram interface {
Record(ctx context.Context, incr float64, options ...RecordOption)
}
-// Float64HistogramConfig contains options for synchronous counter instruments
-// that record int64 values.
+// Float64HistogramConfig contains options for synchronous histogram
+// instruments that record float64 values.
type Float64HistogramConfig struct {
description string
unit string
@@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Float64HistogramOption interface {
applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
}
+
+// Float64Gauge is an instrument that records instantaneous float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value float64, options ...RecordOption)
+}
+
+// Float64GaugeConfig contains options for synchronous gauge instruments that
+// record float64 values.
+type Float64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
+// applied.
+func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
+ var config Float64GaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64GaugeOption.
+type Float64GaugeOption interface {
+ applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
index 56667d32f..783fdfba7 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package metric // import "go.opentelemetry.io/otel/metric"
@@ -144,7 +133,7 @@ type Int64Histogram interface {
Record(ctx context.Context, incr int64, options ...RecordOption)
}
-// Int64HistogramConfig contains options for synchronous counter instruments
+// Int64HistogramConfig contains options for synchronous histogram instruments
// that record int64 values.
type Int64HistogramConfig struct {
description string
@@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
type Int64HistogramOption interface {
applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
}
+
+// Int64Gauge is an instrument that records instantaneous int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value int64, options ...RecordOption)
+}
+
+// Int64GaugeConfig contains options for synchronous gauge instruments that
+// record int64 values.
+type Int64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
+// applied.
+func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
+ var config Int64GaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Int64GaugeOption.
+type Int64GaugeOption interface {
+ applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
index d29aaa32c..2fd949733 100644
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md
new file mode 100644
index 000000000..e2959ac74
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/README.md
@@ -0,0 +1,3 @@
+# Propagation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/propagation)
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
index 303cdf1cb..552263ba7 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
index c119eb285..33a3baf15 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/doc.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package propagation contains OpenTelemetry context propagators.
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
index c94438f73..8c8286aab 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 63e5d6222..6870e316d 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package propagation // import "go.opentelemetry.io/otel/propagation"
@@ -46,7 +35,7 @@ var (
versionPart = fmt.Sprintf("%.2X", supportedVersion)
)
-// Inject set tracecontext from the Context into the carrier.
+// Inject injects the trace context from ctx into carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() {
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
new file mode 100644
index 000000000..4f80c898a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -0,0 +1,26 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "ignorePaths": [],
+ "labels": ["Skip Changelog", "dependencies"],
+ "postUpdateOptions" : [
+ "gomodTidy"
+ ],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": true
+ },
+ {
+ "matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
+ "groupName": "googleapis"
+ },
+ {
+ "matchPackageNames": ["golang.org/x/**"],
+ "groupName": "golang.org/x"
+ }
+ ]
+}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
index e0a43e138..ab09daf9d 100644
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -1 +1 @@
-codespell==2.2.6
+codespell==2.3.0
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
new file mode 100644
index 000000000..87b842c5d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.17.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
index 71a1f7748..e087c9c04 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
index 679c40c4d..c7b804bbe 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
index 9b8c559de..137acc67d 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
index d5c4b5c13..d318221e5 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
index 39a2eab3a..7e365e82c 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
index 42fc525d1..634a1dce0 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
index 8c4a7299d..21497bb6b 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
new file mode 100644
index 000000000..82e1f46b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.20.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
index 67d1d4c44..6685c392b 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
index 359c5a696..0d1f55a8f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
index 8ac9350d2..637763932 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
index 09ff4dfdb..f40c97825 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
index 342aede95..9c1840631 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
index a2b906742..3d44dae27 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
index e449e5c3b..95d0210e3 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
index 851774148..90b1b0452 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Code generated from semantic convention specification. DO NOT EDIT.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
new file mode 100644
index 000000000..2de1fc3c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.26.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
new file mode 100644
index 000000000..d8dc822b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
@@ -0,0 +1,8996 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: stable
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
+ // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
+ // It represents the aSP.NET Core exception middleware handling result
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'handled', 'unhandled'
+ AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+
+ // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
+ // "aspnetcore.routing.match_status" semantic conventions. It represents
+ // the match result - success or failure
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'success', 'failure'
+ AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+var (
+ // Exception was handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
+ // Exception was not handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
+ // Exception handling was skipped because the response had started
+ AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
+ // Exception handling didn't run because the request was aborted
+ AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
+)
+
+var (
+ // Match succeeded
+ AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
+ // Match failed
+ AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// Generic attributes for AWS services.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// Attributes for AWS Elastic Container Service (ECS).
+const (
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID
+ // MUST be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
+ // populated.)
+ // Stability: experimental
+ // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
+ // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
+ // running [ECS
+ // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
+ // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family
+ // name of the [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+ // used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the
+// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
+// ECS task. The ID MUST be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS
+// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+// used to create the ECS task.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Attributes for AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Attributes for AWS Logs.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Attributes for AWS Lambda.
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for AWS S3.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// The web browser attributes
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Attributes for CloudEvents.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCPUStateKey is the attribute Key conforming to the
+ // "container.cpu.state" semantic conventions. It represents the CPU state
+ // for this data point.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'user', 'kernel'
+ ContainerCPUStateKey = attribute.Key("container.cpu.state")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+var (
+ // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
+ ContainerCPUStateUser = ContainerCPUStateKey.String("user")
+ // When CPU is used by the system (host OS)
+ ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
+ // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
+ ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// This group defines the attributes used to describe telemetry in the context
+// of databases.
+const (
+ // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
+ // "db.client.connections.pool.name" semantic conventions. It represents
+ // the name of the connection pool; unique within the instrumented
+ // application. In case the connection pool implementation doesn't provide
+ // a name, instrumentation should use a combination of `server.address` and
+ // `server.port` attributes formatted as `server.address:server.port`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
+
+ // DBClientConnectionsStateKey is the attribute Key conforming to the
+ // "db.client.connections.state" semantic conventions. It represents the
+ // state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle'
+ DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: If the collection name is parsed from the query, it SHOULD match
+ // the value provided in the query and may be qualified with the schema and
+ // database name.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
+ // semantic conventions. It represents the name of the database, fully
+ // qualified within the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'test.users'
+ // Note: If a database system has multiple namespace components, they
+ // SHOULD be concatenated (potentially using database system specific
+ // conventions) from most general to most specific namespace component, and
+ // more specific namespaces SHOULD NOT be captured without the more general
+ // namespaces, to ensure that "startswith" queries for the more general
+ // namespaces will be valid.
+ // Semantic conventions for individual database systems SHOULD document
+ // what `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationNameKey is the attribute Key conforming to the
+ // "db.operation.name" semantic conventions. It represents the name of the
+ // operation or command being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: It is RECOMMENDED to capture the value as provided by the
+ // application without attempting to do any case normalization.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
+ // "WuValue"'
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents the database management system (DBMS) product
+ // as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual DBMS may differ from the one identified by the client.
+ // For example, when using PostgreSQL client libraries to connect to a
+ // CockroachDB, the `db.system` is set to `postgresql` based on the
+ // instrumentation's best knowledge.
+ DBSystemKey = attribute.Key("db.system")
+)
+
+var (
+ // idle
+ DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
+ // used
+ DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
+// the "db.client.connections.pool.name" semantic conventions. It represents
+// the name of the connection pool; unique within the instrumented application.
+// In case the connection pool implementation doesn't provide a name,
+// instrumentation should use a combination of `server.address` and
+// `server.port` attributes formatted as `server.address:server.port`.
+func DBClientConnectionsPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionsPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the
+// "db.namespace" semantic conventions. It represents the name of the database,
+// fully qualified within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the
+// "db.query.text" semantic conventions. It represents the database query being
+// executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// This group defines attributes for Cassandra.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// This group defines attributes for Azure Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// This group defines attributes for Elasticsearch.
+const (
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Attributes for software deployments.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// Attributes that represents an occurrence of a lifecycle transition on the
+// Android platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the deprecated use the
+ // `device.app.lifecycle` event definition including `android.state` as a
+ // payload field instead.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report a DNS query.
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the
+ // "dns.question.name" semantic conventions. It represents the name being
+ // queried.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.example.com', 'opentelemetry.io'
+ // Note: If the name field contains non-printable characters (below 32 or
+ // above 126), those characters should be represented as escaped base 10
+ // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
+ // carriage returns, and line feeds should be converted to \t, \r, and \n
+ // respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Attributes for operations with an authenticated and/or authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be
+ // used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// FaaS attributes
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Attributes for Feature Flags.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// Describes file attributes.
+const (
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is
+ // located. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/user', 'C:\\Program Files\\MyApp'
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the
+ // leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: When the file name has multiple extensions (example.tar.gz), only
+ // the last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.png'
+ FileNameKey = attribute.Key("file.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/alice/example.png', 'C:\\Program
+ // Files\\MyApp\\myapp.exe'
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FileSizeKey = attribute.Key("file.size")
+)
+
+// FileDirectory returns an attribute KeyValue conforming to the
+// "file.directory" semantic conventions. It represents the directory where the
+// file is located. It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the
+// "file.extension" semantic conventions. It represents the file extension,
+// excluding the leading dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name"
+// semantic conventions. It represents the name of the file including the
+// extension, without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path"
+// semantic conventions. It represents the full path to the file, including the
+// file name. It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size"
+// semantic conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// Attributes for Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Attributes for Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// The attributes used to describe telemetry in the context of LLM (Large
+// Language Models) requests and responses.
+const (
+ // GenAiCompletionKey is the attribute Key conforming to the
+ // "gen_ai.completion" semantic conventions. It represents the full
+ // response received from the LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
+ // Paris.'}]"
+ // Note: It's RECOMMENDED to format completions as JSON string matching
+ // [OpenAI messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiCompletionKey = attribute.Key("gen_ai.completion")
+
+ // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
+ // semantic conventions. It represents the full prompt sent to an LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'user', 'content': 'What is the capital of
+ // France?'}]"
+ // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
+ // messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiPromptKey = attribute.Key("gen_ai.prompt")
+
+ // GenAiRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the
+ // maximum number of tokens the LLM generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAiRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of
+ // the LLM a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4'
+ GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAiRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0.0
+ GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAiRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p
+ // sampling setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0
+ GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to
+ // each generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'stop'
+ GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAiResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'chatcmpl-123'
+ GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAiResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of
+ // the LLM a response was generated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4-0613'
+ GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as
+ // identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'openai'
+ // Note: The actual GenAI product may differ from the one identified by the
+ // client. For example, when using OpenAI client libraries to communicate
+ // with Mistral, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge.
+ GenAiSystemKey = attribute.Key("gen_ai.system")
+
+ // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 180
+ GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
+
+ // GenAiUsagePromptTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM prompt.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
+)
+
+var (
+ // OpenAI
+ GenAiSystemOpenai = GenAiSystemKey.String("openai")
+)
+
+// GenAiCompletion returns an attribute KeyValue conforming to the
+// "gen_ai.completion" semantic conventions. It represents the full response
+// received from the LLM.
+func GenAiCompletion(val string) attribute.KeyValue {
+ return GenAiCompletionKey.String(val)
+}
+
+// GenAiPrompt returns an attribute KeyValue conforming to the
+// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
+// an LLM.
+func GenAiPrompt(val string) attribute.KeyValue {
+ return GenAiPromptKey.String(val)
+}
+
+// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the LLM generates for a request.
+func GenAiRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAiRequestMaxTokensKey.Int(val)
+}
+
+// GenAiRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// LLM a request is being made to.
+func GenAiRequestModel(val string) attribute.KeyValue {
+ return GenAiRequestModelKey.String(val)
+}
+
+// GenAiRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the LLM request.
+func GenAiRequestTemperature(val float64) attribute.KeyValue {
+ return GenAiRequestTemperatureKey.Float64(val)
+}
+
+// GenAiRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+// sampling setting for the LLM request.
+func GenAiRequestTopP(val float64) attribute.KeyValue {
+ return GenAiRequestTopPKey.Float64(val)
+}
+
+// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
+// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
+// array of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAiResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAiResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique
+// identifier for the completion.
+func GenAiResponseID(val string) attribute.KeyValue {
+ return GenAiResponseIDKey.String(val)
+}
+
+// GenAiResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// LLM a response was generated from.
+func GenAiResponseModel(val string) attribute.KeyValue {
+ return GenAiResponseModelKey.String(val)
+}
+
+// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
+// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+// number of tokens used in the LLM response (completion).
+func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
+ return GenAiUsageCompletionTokensKey.Int(val)
+}
+
+// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
+// of tokens used in the LLM prompt.
+func GenAiUsagePromptTokens(val int) attribute.KeyValue {
+ return GenAiUsagePromptTokensKey.Int(val)
+}
+
+// Attributes for GraphQL.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// Attributes for the Android platform on which the Android application is
+// running.
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1', 'r1p1'
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of
+ // the HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'active', 'idle'
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the
+ // "http.request.size" semantic conventions. It represents the total size
+ // of the request in bytes. This should be the total number of bytes sent
+ // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
+ // and HTTP/3), headers, and request body if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size
+ // of the response in bytes. This should be the total number of bytes sent
+ // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
+ // HTTP/3), headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // active state
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of
+// the request in bytes. This should be the total number of bytes sent over the
+// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of
+// the response in bytes. This should be the total number of bytes sent over
+// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Java Virtual machine related attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+
+ // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
+ // semantic conventions. It represents the name of the garbage collector
+ // action.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'end of minor GC', 'end of major GC'
+ // Note: Garbage collector action is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
+ JvmGcActionKey = attribute.Key("jvm.gc.action")
+
+ // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
+ // semantic conventions. It represents the name of the garbage collector.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Young Generation', 'G1 Old Generation'
+ // Note: Garbage collector name is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
+ JvmGcNameKey = attribute.Key("jvm.gc.name")
+
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+
+ // JvmThreadDaemonKey is the attribute Key conforming to the
+ // "jvm.thread.daemon" semantic conventions. It represents the whether the
+ // thread is daemon or not.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
+
+ // JvmThreadStateKey is the attribute Key conforming to the
+ // "jvm.thread.state" semantic conventions. It represents the state of the
+ // thread.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'runnable', 'blocked'
+ JvmThreadStateKey = attribute.Key("jvm.thread.state")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+var (
+ // A thread that has not yet started is in this state
+ JvmThreadStateNew = JvmThreadStateKey.String("new")
+ // A thread executing in the Java virtual machine is in this state
+ JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
+ // A thread that is blocked waiting for a monitor lock is in this state
+ JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
+ // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
+ JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
+ // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
+ JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
+ // A thread that has exited is in this state
+ JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// JvmGcAction returns an attribute KeyValue conforming to the
+// "jvm.gc.action" semantic conventions. It represents the name of the garbage
+// collector action.
+func JvmGcAction(val string) attribute.KeyValue {
+ return JvmGcActionKey.String(val)
+}
+
+// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
+// semantic conventions. It represents the name of the garbage collector.
+func JvmGcName(val string) attribute.KeyValue {
+ return JvmGcNameKey.String(val)
+}
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// JvmThreadDaemon returns an attribute KeyValue conforming to the
+// "jvm.thread.daemon" semantic conventions. It represents the whether the
+// thread is daemon or not.
+func JvmThreadDaemon(val bool) attribute.KeyValue {
+ return JvmThreadDaemonKey.Bool(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
+ // conforming to the "k8s.container.status.last_terminated_reason" semantic
+ // conventions. It represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Evicted', 'Error'
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Attributes for a file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// The generic attributes that may be used in any Log Record.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to
+ // the "messaging.destination.partition.id" semantic conventions. It
+ // represents the identifier of the partition messages are sent to or
+ // received from, unique within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1'
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack', 'nack', 'send'
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents the messaging
+ // system as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate
+ // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
+ // the instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are delivered to or processed by a consumer
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming
+// to the "messaging.destination.partition.id" semantic conventions. It
+// represents the identifier of the partition messages are sent to or received
+// from, unique within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// This group describes attributes specific to Apache Kafka.
+const (
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// This group describes attributes specific to RabbitMQ.
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
+ // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
+ // It represents the rabbitMQ message delivery tag
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 123
+ MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
+// conventions. It represents the rabbitMQ message delivery tag
+func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
+}
+
+// This group describes attributes specific to RocketMQ.
+const (
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// This group describes attributes specific to GCP Pub/Sub.
+const (
+ // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
+ // It represents the ack deadline in seconds set for the modify ack
+ // deadline request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+ // represents the ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack_id'
+ MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
+ // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
+ // semantic conventions. It represents the delivery attempt for a given
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+)
+
+// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
+// conventions. It represents the ack deadline in seconds set for the modify
+// ack deadline request.
+func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+// represents the ack id for a given message.
+func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// This group describes attributes specific to Azure Service Bus.
+const (
+ // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
+ // conforming to the "messaging.servicebus.destination.subscription_name"
+ // semantic conventions. It represents the name of the subscription in the
+ // topic messages are received from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mySubscription'
+ MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
+
+ // MessagingServicebusDispositionStatusKey is the attribute Key conforming
+ // to the "messaging.servicebus.disposition_status" semantic conventions.
+ // It represents the describes the [settlement
+ // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServicebusMessageDeliveryCountKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.delivery_count" semantic
+ // conventions. It represents the number of deliveries that have been
+ // attempted for this message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.enqueued_time" semantic
+ // conventions. It represents the UTC epoch seconds at which the message
+ // has been accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+)
+
+var (
+ // Message is completed
+ MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
+)
+
+// MessagingServicebusDestinationSubscriptionName returns an attribute
+// KeyValue conforming to the
+// "messaging.servicebus.destination.subscription_name" semantic conventions.
+// It represents the name of the subscription in the topic messages are
+// received from.
+func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingServicebusDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServicebusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
+}
+
+// This group describes attributes specific to Azure Event Hubs.
+const (
+ // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
+ // the "messaging.eventhubs.consumer.group" semantic conventions. It
+ // represents the name of the consumer group the event consumer is
+ // associated with.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'indexer'
+ MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
+
+ // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
+ // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
+ // It represents the UTC epoch seconds at which the message has been
+ // accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+)
+
+// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.consumer.group" semantic conventions. It
+// represents the name of the consumer group the event consumer is associated
+// with.
+func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
+ return MessagingEventhubsConsumerGroupKey.String(val)
+}
+
+// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // actual version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.1', '2'
+ // Note: If protocol version is subject to negotiation (for example using
+ // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
+ // SHOULD be set to the negotiated version. If the actual protocol version
+ // is not known, this attribute SHOULD NOT be set.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// Attributes used by the OpenTracing Shim layer.
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Attributes reserved for OpenTelemetry
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were
+ // voluntary or involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and
+ // time the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:25:34.853Z'
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the
+ // "process.exit.code" semantic conventions. It represents the exit code of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the
+ // "process.exit.time" semantic conventions. It represents the date and
+ // time the process exited, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:26:12.315Z'
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID
+ // of the process's group leader. This is also the process group ID (PGID)
+ // of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether
+ // the process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type
+ // of page fault for this data point. Type `major` is for major/hard page
+ // faults, and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user
+ // ID (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the
+ // username of the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved
+ // user ID (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the
+ // username of the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID
+ // of the process's session leader. This is also the session ID (SID) of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessUserIDKey is the attribute Key conforming to the
+ // "process.user.id" semantic conventions. It represents the effective user
+ // ID (EUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the
+ // "process.user.name" semantic conventions. It represents the username of
+ // the effective user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
+ // semantic conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily
+ // unique across all processes on the host but it is unique within the
+ // process namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+)
+
+var (
+ // voluntary
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+var (
+ // major
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and
+// time the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time
+// the process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of
+// the process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user
+// ID (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username
+// of the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the
+// "process.vpid" semantic conventions. It represents the virtual process
+// identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// Attributes for process CPU
+const (
+ // ProcessCPUStateKey is the attribute Key conforming to the
+ // "process.cpu.state" semantic conventions. It represents the CPU state of
+ // the process.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+ // system
+ ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+ // user
+ ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+ // wait
+ ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the
+ // "rpc.message.type" semantic conventions. It represents the whether this
+ // is a received or sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // sent
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the
+// "rpc.message.id" semantic conventions. It represents the mUST be calculated
+// as two different counters starting from `1` one for sent messages and one
+// for received message.
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
+// the "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time
+ // (e.g. instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random
+ // Version 1 or Version 4 [RFC
+ // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
+ // inherent unique ID as the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be
+ // used as source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the
+ // purposes of identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
+ // file, the underlying
+ // data, such as pod name and namespace should be treated as confidential,
+ // being the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we
+ // do not recommend using one identifier
+ // for all processes participating in the application. Instead, it's
+ // recommended each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it
+ // can't unambiguously determine the
+ // service instance that is generating that telemetry. For instance,
+ // creating an UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container
+ // within that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers,
+ // as they know the target address and
+ // port.
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
+ // `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Describes System attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process attributes
+const (
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+ // running
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Attributes for telemetry SDK.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain"
+ // semantic conventions. It represents the domain extracted from the
+ // `url.full`, such as "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
+ // '[1080:0:0:0:8:800:200C:417A]'
+ // Note: In some cases a URL may refer to an IP and/or port directly,
+ // without a domain name. In this case, the IP address would go to the
+ // domain field. If the URL contains a [literal IPv6
+ // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
+ // `[` and `]`, the `[` and `]` characters should also be captured in the
+ // domain field.
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from
+ // the `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: The file extension is only set if it exists, as not every url has
+ // a file extension. When the file name has multiple extensions
+ // `example.tar.gz`, only the last one should be captured `gz`, not
+ // `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed). Sensitive content provided in `url.full` SHOULD be
+ // scrubbed when instrumentations can identify it.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original"
+ // semantic conventions. It represents the unmodified original URL as seen
+ // in the event source.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // 'search?q=OpenTelemetry'
+ // Note: In network monitoring, the observed URL may be a full URL, whereas
+ // in access logs, the URL is often just represented as a path. This field
+ // is meant to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the
+ // same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.com', 'foo.co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org). For example, the registered domain for
+ // `foo.example.com` is `example.com`. Trying to approximate this by simply
+ // taking the last two labels will not work well for TLDs such as `co.uk`.
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name
+ // under the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain
+ // contains all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'east', 'sub2.sub1'
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
+ // the domain has multiple levels of subdomain, such as
+ // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
+ // with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template"
+ // semantic conventions. It represents the low-cardinality template of an
+ // [absolute path
+ // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective
+ // top level domain (eTLD), also known as the domain suffix, is the last
+ // part of the domain name. For example, the top level domain for
+ // example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com', 'co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org).
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the
+// `url.full`, such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the
+// "url.extension" semantic conventions. It represents the file extension
+// extracted from the `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the
+// "url.original" semantic conventions. It represents the unmodified original
+// URL as seen in the event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port"
+// semantic conventions. It represents the port extracted from the `url.full`
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the
+// "url.subdomain" semantic conventions. It represents the subdomain portion of
+// a fully qualified domain name includes all of the names except the host name
+// under the registered_domain. In a partially qualified domain, or if the
+// qualification level of the full name cannot be determined, subdomain
+// contains all of the names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the
+// "url.template" semantic conventions. It represents the low-cardinality
+// template of an [absolute path
+// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of
+// the domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentNameKey is the attribute Key conforming to the
+ // "user_agent.name" semantic conventions. It represents the name of the
+ // user-agent extracted from original. Usually refers to the browser's
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Safari', 'YourApp'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
+ // from original string. In the case of using a user-agent for non-browser
+ // products, such as microservices with multiple names/versions inside the
+ // `user_agent.original`, the most significant name SHOULD be selected. In
+ // such a scenario it should align with `user_agent.version`
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of
+ // the user-agent extracted from original. Usually refers to the browser's
+ // version
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.1.2', '1.0.0'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's
+ // version from original string. In the case of using a user-agent for
+ // non-browser products, such as microservices with multiple names/versions
+ // inside the `user_agent.original`, the most significant version SHOULD be
+ // selected. In such a scenario it should align with `user_agent.name`
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// The attributes used to describe the packaged software running the
+// application code.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
new file mode 100644
index 000000000..d031bbea7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.26.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
new file mode 100644
index 000000000..bfaee0d56
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
new file mode 100644
index 000000000..fcdb9f485
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
@@ -0,0 +1,1307 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+
+ // ContainerCPUTime is the metric conforming to the "container.cpu.time"
+ // semantic conventions. It represents the total CPU time consumed.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ContainerCPUTimeName = "container.cpu.time"
+ ContainerCPUTimeUnit = "s"
+ ContainerCPUTimeDescription = "Total CPU time consumed"
+
+ // ContainerMemoryUsage is the metric conforming to the
+ // "container.memory.usage" semantic conventions. It represents the memory
+ // usage of the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerMemoryUsageName = "container.memory.usage"
+ ContainerMemoryUsageUnit = "By"
+ ContainerMemoryUsageDescription = "Memory usage of the container."
+
+ // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
+ // conventions. It represents the disk bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerDiskIoName = "container.disk.io"
+ ContainerDiskIoUnit = "By"
+ ContainerDiskIoDescription = "Disk bytes for the container."
+
+ // ContainerNetworkIo is the metric conforming to the "container.network.io"
+ // semantic conventions. It represents the network bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerNetworkIoName = "container.network.io"
+ ContainerNetworkIoUnit = "By"
+ ContainerNetworkIoDescription = "Network bytes for the container."
+
+ // DBClientOperationDuration is the metric conforming to the
+ // "db.client.operation.duration" semantic conventions. It represents the
+ // duration of database client operations.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientOperationDurationName = "db.client.operation.duration"
+ DBClientOperationDurationUnit = "s"
+ DBClientOperationDurationDescription = "Duration of database client operations."
+
+ // DBClientConnectionCount is the metric conforming to the
+ // "db.client.connection.count" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionCountName = "db.client.connection.count"
+ DBClientConnectionCountUnit = "{connection}"
+ DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionIdleMax is the metric conforming to the
+ // "db.client.connection.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
+ DBClientConnectionIdleMaxUnit = "{connection}"
+ DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionIdleMin is the metric conforming to the
+ // "db.client.connection.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMinName = "db.client.connection.idle.min"
+ DBClientConnectionIdleMinUnit = "{connection}"
+ DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionMax is the metric conforming to the
+ // "db.client.connection.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionMaxName = "db.client.connection.max"
+ DBClientConnectionMaxUnit = "{connection}"
+ DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionPendingRequests is the metric conforming to the
+ // "db.client.connection.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
+ DBClientConnectionPendingRequestsUnit = "{request}"
+ DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionTimeouts is the metric conforming to the
+ // "db.client.connection.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
+ DBClientConnectionTimeoutsUnit = "{timeout}"
+ DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionCreateTime is the metric conforming to the
+ // "db.client.connection.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionCreateTimeName = "db.client.connection.create_time"
+ DBClientConnectionCreateTimeUnit = "s"
+ DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionWaitTime is the metric conforming to the
+ // "db.client.connection.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
+ DBClientConnectionWaitTimeUnit = "s"
+ DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionUseTime is the metric conforming to the
+ // "db.client.connection.use_time" semantic conventions. It represents the time
+ // between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionUseTimeName = "db.client.connection.use_time"
+ DBClientConnectionUseTimeUnit = "s"
+ DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.count` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.min` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the deprecated, use `db.client.connection.pending_requests` instead.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.timeouts` instead.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.create_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
+ // changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Stable
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Stable
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Stable
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingProcessDuration is the metric conforming to the
+ // "messaging.process.duration" semantic conventions. It represents the
+ // measures the duration of process operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingProcessDurationName = "messaging.process.duration"
+ MessagingProcessDurationUnit = "s"
+ MessagingProcessDurationDescription = "Measures the duration of process operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingProcessMessages is the metric conforming to the
+ // "messaging.process.messages" semantic conventions. It represents the
+ // measures the number of processed messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingProcessMessagesName = "messaging.process.messages"
+ MessagingProcessMessagesUnit = "{message}"
+ MessagingProcessMessagesDescription = "Measures the number of processed messages."
+
+ // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
+ // conventions. It represents the total CPU seconds broken down by different
+ // states.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ProcessCPUTimeName = "process.cpu.time"
+ ProcessCPUTimeUnit = "s"
+ ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
+
+ // ProcessCPUUtilization is the metric conforming to the
+ // "process.cpu.utilization" semantic conventions. It represents the difference
+ // in process.cpu.time since the last measurement, divided by the elapsed time
+ // and number of CPUs available to the process.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ ProcessCPUUtilizationName = "process.cpu.utilization"
+ ProcessCPUUtilizationUnit = "1"
+ ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
+
+ // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
+ // semantic conventions. It represents the amount of physical memory in use.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryUsageName = "process.memory.usage"
+ ProcessMemoryUsageUnit = "By"
+ ProcessMemoryUsageDescription = "The amount of physical memory in use."
+
+ // ProcessMemoryVirtual is the metric conforming to the
+ // "process.memory.virtual" semantic conventions. It represents the amount of
+ // committed virtual memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryVirtualName = "process.memory.virtual"
+ ProcessMemoryVirtualUnit = "By"
+ ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
+
+ // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
+ // conventions. It represents the disk bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessDiskIoName = "process.disk.io"
+ ProcessDiskIoUnit = "By"
+ ProcessDiskIoDescription = "Disk bytes transferred."
+
+ // ProcessNetworkIo is the metric conforming to the "process.network.io"
+ // semantic conventions. It represents the network bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessNetworkIoName = "process.network.io"
+ ProcessNetworkIoUnit = "By"
+ ProcessNetworkIoDescription = "Network bytes transferred."
+
+ // ProcessThreadCount is the metric conforming to the "process.thread.count"
+ // semantic conventions. It represents the process threads count.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Experimental
+ ProcessThreadCountName = "process.thread.count"
+ ProcessThreadCountUnit = "{thread}"
+ ProcessThreadCountDescription = "Process threads count."
+
+ // ProcessOpenFileDescriptorCount is the metric conforming to the
+ // "process.open_file_descriptor.count" semantic conventions. It represents the
+ // number of file descriptors in use by the process.
+ // Instrument: updowncounter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
+ ProcessOpenFileDescriptorCountUnit = "{count}"
+ ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
+
+ // ProcessContextSwitches is the metric conforming to the
+ // "process.context_switches" semantic conventions. It represents the number of
+ // times the process has been context switched.
+ // Instrument: counter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessContextSwitchesName = "process.context_switches"
+ ProcessContextSwitchesUnit = "{count}"
+ ProcessContextSwitchesDescription = "Number of times the process has been context switched."
+
+ // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
+ // semantic conventions. It represents the number of page faults the process
+ // has made.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ ProcessPagingFaultsName = "process.paging.faults"
+ ProcessPagingFaultsUnit = "{fault}"
+ ProcessPagingFaultsDescription = "Number of page faults the process has made."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryShared is the metric conforming to the "system.memory.shared"
+ // semantic conventions. It represents the shared memory used (mostly by
+ // tmpfs).
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemorySharedName = "system.memory.shared"
+ SystemMemorySharedUnit = "By"
+ SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessCount is the metric conforming to the "system.process.count"
+ // semantic conventions. It represents the total number of processes in each
+ // state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCountName = "system.process.count"
+ SystemProcessCountUnit = "{process}"
+ SystemProcessCountDescription = "Total number of processes in each state"
+
+ // SystemProcessCreated is the metric conforming to the
+ // "system.process.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCreatedName = "system.process.created"
+ SystemProcessCreatedUnit = "{process}"
+ SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
new file mode 100644
index 000000000..4c87c7adc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
index caf7249de..6836c6547 100644
--- a/vendor/go.opentelemetry.io/otel/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md
new file mode 100644
index 000000000..58ccaba69
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/README.md
@@ -0,0 +1,3 @@
+# Trace API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace)
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 3aadc66cf..9c0b720a4 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -224,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{}
// WithAttributes adds the attributes related to a span life-cycle event.
// These attributes are used to describe the work a Span represents when this
-// option is provided to a Span's start or end events. Otherwise, these
+// option is provided to a Span's start event. Otherwise, these
// attributes provide additional information about the event being recorded
// (e.g. error, state change, processing progress, system event).
//
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
index 76f9a083c..8c45a7107 100644
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -33,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
}
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
// as a remote SpanContext and as the current Span. The Span implementation
// that wraps rsc is non-recording and performs no operations other than to
// return rsc as the SpanContext from the SpanContext method.
@@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte
// performs no operations is returned.
func SpanFromContext(ctx context.Context) Span {
if ctx == nil {
- return noopSpan{}
+ return noopSpanInstance
}
if span, ok := ctx.Value(currentSpanKey).(Span); ok {
return span
}
- return noopSpan{}
+ return noopSpanInstance
}
// SpanContextFromContext returns the current Span's SpanContext.
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index 440f3d756..cdbf41d6d 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
/*
Package trace provides an implementation of the tracing part of the
@@ -107,7 +96,7 @@ can embed the API interface directly.
This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
dependency.
Finally, an author can embed another implementation in theirs. The embedded
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
new file mode 100644
index 000000000..7754a239e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
@@ -0,0 +1,3 @@
+# Trace Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
index 898db5a75..3e359a00b 100644
--- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
// Package embedded provides interfaces embedded within the [OpenTelemetry
// trace API].
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
index 88fcb8161..c00221e7b 100644
--- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index c125491ca..ca20e9997 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
span := SpanFromContext(ctx)
if _, ok := span.(nonRecordingSpan); !ok {
// span is likely already a noopSpan, but let's be sure
- span = noopSpan{}
+ span = noopSpanInstance
}
return ContextWithSpan(ctx, span), span
}
@@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
// noopSpan is an implementation of Span that performs no operations.
type noopSpan struct{ embedded.Span }
-var _ Span = noopSpan{}
+var noopSpanInstance Span = noopSpan{}
// SpanContext returns an empty span context.
func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
@@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {}
// AddEvent does nothing.
func (noopSpan) AddEvent(string, ...EventOption) {}
+// AddLink does nothing.
+func (noopSpan) AddLink(Link) {}
+
// SetName does nothing.
func (noopSpan) SetName(string) {}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
new file mode 100644
index 000000000..cd382c82a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
@@ -0,0 +1,3 @@
+# Trace Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop)
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
new file mode 100644
index 000000000..64a4f1b36
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -0,0 +1,112 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ trace.TracerProvider = TracerProvider{}
+ _ trace.Tracer = Tracer{}
+ _ trace.Span = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+ return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+ return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+ span := trace.SpanFromContext(ctx)
+
+ // If the parent context contains a non-zero span context, that span
+ // context needs to be returned as a non-recording span
+ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+ var zeroSC trace.SpanContext
+ if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+ if !span.IsRecording() {
+ // If the span is not recording return it directly.
+ return ctx, span
+ }
+ // Otherwise, return the span context needs in a non-recording span.
+ span = Span{sc: sc}
+ } else {
+ // No parent, return a No-Op span with an empty span context.
+ span = noopSpanInstance
+ }
+ return trace.ContextWithSpan(ctx, span), span
+}
+
+var noopSpanInstance trace.Span = Span{}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+ embedded.Span
+
+ sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (Span) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 000000000..ef85cb70c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 000000000..d3aa476ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 26a4b2260..d49adf671 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -1,28 +1,12 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
import (
"bytes"
- "context"
"encoding/hex"
"encoding/json"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -337,241 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
Remote: sc.remote,
})
}
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 000000000..77952d2a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index db936ba5b..dc5e34cad 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/otel/trace"
@@ -271,6 +260,16 @@ func (ts TraceState) Get(key string) string {
return ""
}
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
// Insert adds a new list-member defined by the key/value pair to the
// TraceState. If a list-member already exists for the given key, that
// list-member's value is updated. The new or updated list-member is always
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index dbb61a422..000000000
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
- printf "GOPATH is not defined.\n"
- exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
- printf "GOPATH ${GOPATH} is invalid \n"
- exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
- git status
- printf "\n\nError: working tree is not clean\n"
- exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
- printf "$(git log -1)"
- printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
- printf " Update go.mod for $dir\n"
- (cd "${DIR_TMP}/${dir}" && \
- # replaces is ("mod1" "mod2" …)
- replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
- # strip double quotes
- replaces=("${replaces[@]%\"}") && \
- replaces=("${replaces[@]#\"}") && \
- # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
- dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
- go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
- go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
- printf " Build $ex in ${DIR_TMP}/${ex}\n"
- (cd "${DIR_TMP}/${ex}" && \
- go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh
new file mode 100644
index 000000000..1e87855ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort)
+
+missingReadme=false
+for dir in $dirs; do
+ if [ ! -f "$dir/README.md" ]; then
+ echo "couldn't find README.md for $dir"
+ missingReadme=true
+ fi
+done
+
+if [ "$missingReadme" = true ] ; then
+ echo "Error: some READMEs couldn't be found."
+ exit 1
+fi
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 000000000..c9b7cdbbf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+ echo "Error: The released sections of the changelog file have been modified."
+ diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+ rm -rf "$TEMP_DIR"
+ false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index 7b2993a1f..eb22002d8 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.24.0"
+ return "1.34.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 1b556e678..ce4fe59b0 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -1,32 +1,15 @@
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
module-sets:
stable-v1:
- version: v1.24.0
+ version: v1.34.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- - go.opentelemetry.io/otel/example/dice
- - go.opentelemetry.io/otel/example/namedtracer
- - go.opentelemetry.io/otel/example/opencensus
- - go.opentelemetry.io/otel/example/otel-collector
- - go.opentelemetry.io/otel/example/passthrough
- - go.opentelemetry.io/otel/example/zipkin
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
@@ -40,16 +23,19 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.46.0
+ version: v0.56.0
modules:
- - go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.0.1-alpha
+ version: v0.10.0
modules:
- go.opentelemetry.io/otel/log
+ - go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.7
+ version: v0.0.12
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index de58dfb8d..ca645d9a1 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
return conf
}
-// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
index e3784123c..5b516c55f 100644
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
-// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index c7601c909..6c18ea230 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -34,11 +34,19 @@ import (
)
var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
- inTests bool
- disableExtendedConnectProtocol bool
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+
+ // Enabling extended CONNECT by causes browsers to attempt to use
+ // WebSockets-over-HTTP/2. This results in problems when the server's websocket
+ // package doesn't support extended CONNECT.
+ //
+ // Disable extended CONNECT by default for now.
+ //
+ // Issue #71128.
+ disableExtendedConnectProtocol = true
)
func init() {
@@ -51,8 +59,8 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
- if strings.Contains(e, "http2xconnect=0") {
- disableExtendedConnectProtocol = true
+ if strings.Contains(e, "http2xconnect=1") {
+ disableExtendedConnectProtocol = false
}
}
@@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
s.v = save
}
-// validPseudoPath reports whether v is a valid :path pseudo-header
-// value. It must be either:
-//
-// - a non-empty string starting with '/'
-// - the string '*', for OPTIONS requests.
-//
-// For now this is only used a quick check for deciding when to clean
-// up Opaque URLs before sending requests from the Transport.
-// See golang.org/issue/16847
-//
-// We used to enforce that the path also didn't start with "//", but
-// Google's GFE accepts such paths and Chrome sends them, so ignore
-// that part of the spec. See golang.org/issue/19103.
-func validPseudoPath(v string) bool {
- return (len(v) > 0 && v[0] == '/') || v == "*"
-}
-
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b55547aec..b640deb0e 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -50,6 +50,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
)
const (
@@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
- buildCommonHeaderMapsOnce()
- cv, ok := commonCanonHeader[v]
+ cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}
@@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- rp := requestParam{
- method: f.PseudoValue("method"),
- scheme: f.PseudoValue("scheme"),
- authority: f.PseudoValue("authority"),
- path: f.PseudoValue("path"),
- protocol: f.PseudoValue("protocol"),
+ rp := httpcommon.ServerRequestParam{
+ Method: f.PseudoValue("method"),
+ Scheme: f.PseudoValue("scheme"),
+ Authority: f.PseudoValue("authority"),
+ Path: f.PseudoValue("path"),
+ Protocol: f.PseudoValue("protocol"),
}
// extended connect is disabled, so we should not see :protocol
- if disableExtendedConnectProtocol && rp.protocol != "" {
+ if disableExtendedConnectProtocol && rp.Protocol != "" {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
- isConnect := rp.method == "CONNECT"
+ isConnect := rp.Method == "CONNECT"
if isConnect {
- if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
+ if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
- } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
- rp.header = make(http.Header)
+ header := make(http.Header)
+ rp.Header = header
for _, hf := range f.RegularFields() {
- rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
- if rp.authority == "" {
- rp.authority = rp.header.Get("Host")
+ if rp.Authority == "" {
+ rp.Authority = header.Get("Host")
}
- if rp.protocol != "" {
- rp.header.Set(":protocol", rp.protocol)
+ if rp.Protocol != "" {
+ header.Set(":protocol", rp.Protocol)
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
bodyOpen := !f.StreamEnded()
if bodyOpen {
- if vv, ok := rp.header["Content-Length"]; ok {
+ if vv, ok := rp.Header["Content-Length"]; ok {
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
req.ContentLength = int64(cl)
} else {
@@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return rw, req, nil
}
-type requestParam struct {
- method string
- scheme, authority, path string
- protocol string
- header http.Header
-}
-
-func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
- if rp.scheme == "https" {
+ if rp.Scheme == "https" {
tlsState = sc.tlsState
}
- needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
- if needsContinue {
- rp.header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := rp.header["Cookie"]; len(cookies) > 1 {
- rp.header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer http.Header
- for _, v := range rp.header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(textproto.TrimString(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(http.Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(rp.header, "Trailer")
-
- var url_ *url.URL
- var requestURI string
- if rp.method == "CONNECT" && rp.protocol == "" {
- url_ = &url.URL{Host: rp.authority}
- requestURI = rp.authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(rp.path)
- if err != nil {
- return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
- }
- requestURI = rp.path
+ res := httpcommon.NewServerRequest(rp)
+ if res.InvalidReason != "" {
+ return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
}
body := &requestBody{
conn: sc,
stream: st,
- needsContinue: needsContinue,
+ needsContinue: res.NeedsContinue,
}
- req := &http.Request{
- Method: rp.method,
- URL: url_,
+ req := (&http.Request{
+ Method: rp.Method,
+ URL: res.URL,
RemoteAddr: sc.remoteAddrStr,
- Header: rp.header,
- RequestURI: requestURI,
+ Header: rp.Header,
+ RequestURI: res.RequestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
- Host: rp.authority,
+ Host: rp.Authority,
Body: body,
- Trailer: trailer,
- }
- req = req.WithContext(st.ctx)
-
+ Trailer: res.Trailer,
+ }).WithContext(st.ctx)
rw := sc.newResponseWriter(st, req)
return rw, req, nil
}
@@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
- rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
- method: msg.method,
- scheme: msg.url.Scheme,
- authority: msg.url.Host,
- path: msg.url.RequestURI(),
- header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+ Method: msg.method,
+ Scheme: msg.url.Scheme,
+ Authority: msg.url.Host,
+ Path: msg.url.RequestURI(),
+ Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 090d0e1bd..f26356b9c 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "sort"
"strconv"
"strings"
"sync"
@@ -35,6 +34,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
+ "golang.org/x/net/internal/httpcommon"
)
const (
@@ -375,6 +375,7 @@ type ClientConn struct {
doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
+ closedOnIdle bool // true if conn was closed for idleness
seenSettings bool // true if we've seen a settings frame, false otherwise
seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
@@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
+ // If the conn was closed for idleness, we're racing the idle timer;
+ // don't try to use the conn. (Issue #70515.)
//
// This avoids a situation where an error early in a connection's lifetime
// goes unreported.
- if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+ if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
st.canTakeNewRequest = true
}
@@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() {
return
}
cc.closed = true
+ cc.closedOnIdle = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
@@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
-func commaSeparatedTrailers(req *http.Request) (string, error) {
- keys := make([]string, 0, len(req.Trailer))
- for k := range req.Trailer {
- k = canonicalHeader(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", fmt.Errorf("invalid Trailer key %q", k)
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- return strings.Join(keys, ","), nil
- }
- return "", nil
-}
-
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
@@ -1299,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
return 0
}
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
- if v := req.Header.Get("Upgrade"); v != "" {
- return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
- }
- if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
- return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
- }
- if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
- return fmt.Errorf("http2: invalid Connection request header: %q", vv)
- }
- return nil
-}
-
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
@@ -1360,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
donec: make(chan struct{}),
}
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- !cs.isHead {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
- }
+ cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
go cs.doRequest(req, streamf)
@@ -1492,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
cc := cs.cc
ctx := cs.ctx
- if err := checkConnHeaders(req); err != nil {
- return err
- }
-
// wait for setting frames to be received, a server can change this value later,
// but we just wait for the first settings frame
var isExtendedConnect bool
@@ -1659,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
- trailers, err := commaSeparatedTrailers(req)
- if err != nil {
- return err
- }
- hasTrailers := trailers != ""
- contentLen := actualContentLength(req)
- hasBody := contentLen != 0
- hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
+ cc.hbuf.Reset()
+ res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
+ cc.writeHeader(name, value)
+ })
if err != nil {
- return err
+ return fmt.Errorf("http2: %w", err)
}
+ hdrs := cc.hbuf.Bytes()
// Write the request.
- endStream := !hasBody && !hasTrailers
+ endStream := !res.HasBody && !res.HasTrailers
cs.sentHeaders = true
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
return err
}
+func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
+ return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
+ Request: httpcommon.Request{
+ Header: req.Header,
+ Trailer: req.Trailer,
+ URL: req.URL,
+ Host: req.Host,
+ Method: req.Method,
+ ActualContentLength: actualContentLength(req),
+ },
+ AddGzipHeader: addGzipHeader,
+ PeerMaxHeaderListSize: peerMaxHeaderListSize,
+ DefaultUserAgent: defaultUserAgent,
+ }, headerf)
+}
+
// cleanupWriteRequest performs post-request tasks.
//
// If err (the result of writeRequest) is non-nil and the stream is not closed,
@@ -2066,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-func validateHeaders(hdrs http.Header) string {
- for k, vv := range hdrs {
- if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
- return fmt.Sprintf("name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- // Don't include the value in the error,
- // because it may be sensitive.
- return fmt.Sprintf("value for header %q", k)
- }
- }
- }
- return ""
-}
-
-var errNilRequestURL = errors.New("http2: Request.URI is nil")
-
-func isNormalConnect(req *http.Request) bool {
- return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
-}
-
-// requires cc.wmu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
- cc.hbuf.Reset()
- if req.URL == nil {
- return nil, errNilRequestURL
- }
-
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
- host, err := httpguts.PunycodeHostPort(host)
- if err != nil {
- return nil, err
- }
- if !httpguts.ValidHostHeader(host) {
- return nil, errors.New("http2: invalid Host header")
- }
-
- var path string
- if !isNormalConnect(req) {
- path = req.URL.RequestURI()
- if !validPseudoPath(path) {
- orig := path
- path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
- if !validPseudoPath(path) {
- if req.URL.Opaque != "" {
- return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
- } else {
- return nil, fmt.Errorf("invalid request :path %q", orig)
- }
- }
- }
- }
-
- // Check for any invalid headers+trailers and return an error before we
- // potentially pollute our hpack state. (We want to be able to
- // continue to reuse the hpack encoder for future requests)
- if err := validateHeaders(req.Header); err != "" {
- return nil, fmt.Errorf("invalid HTTP header %s", err)
- }
- if err := validateHeaders(req.Trailer); err != "" {
- return nil, fmt.Errorf("invalid HTTP trailer %s", err)
- }
-
- enumerateHeaders := func(f func(name, value string)) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production, see Sections 3.3 and 3.4 of
- // [RFC3986]).
- f(":authority", host)
- m := req.Method
- if m == "" {
- m = http.MethodGet
- }
- f(":method", m)
- if !isNormalConnect(req) {
- f(":path", path)
- f(":scheme", req.URL.Scheme)
- }
- if trailers != "" {
- f("trailer", trailers)
- }
-
- var didUA bool
- for k, vv := range req.Header {
- if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- } else if asciiEqualFold(k, "connection") ||
- asciiEqualFold(k, "proxy-connection") ||
- asciiEqualFold(k, "transfer-encoding") ||
- asciiEqualFold(k, "upgrade") ||
- asciiEqualFold(k, "keep-alive") {
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We have already checked if any
- // are error-worthy so just ignore the rest.
- continue
- } else if asciiEqualFold(k, "user-agent") {
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
- } else if asciiEqualFold(k, "cookie") {
- // Per 8.1.2.5 To allow for better compression efficiency, the
- // Cookie header field MAY be split into separate header fields,
- // each with one or more cookie-pairs.
- for _, v := range vv {
- for {
- p := strings.IndexByte(v, ';')
- if p < 0 {
- break
- }
- f("cookie", v[:p])
- p++
- // strip space after semicolon if any.
- for p+1 <= len(v) && v[p] == ' ' {
- p++
- }
- v = v[p:]
- }
- if len(v) > 0 {
- f("cookie", v)
- }
- }
- continue
- }
-
- for _, v := range vv {
- f(k, v)
- }
- }
- if shouldSendReqContentLength(req.Method, contentLength) {
- f("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- f("accept-encoding", "gzip")
- }
- if !didUA {
- f("user-agent", defaultUserAgent)
- }
- }
-
- // Do a first pass over the headers counting bytes to ensure
- // we don't exceed cc.peerMaxHeaderListSize. This is done as a
- // separate pass before encoding the headers to prevent
- // modifying the hpack state.
- hlSize := uint64(0)
- enumerateHeaders(func(name, value string) {
- hf := hpack.HeaderField{Name: name, Value: value}
- hlSize += uint64(hf.Size())
- })
-
- if hlSize > cc.peerMaxHeaderListSize {
- return nil, errRequestHeaderListSize
- }
-
- trace := httptrace.ContextClientTrace(req.Context())
- traceHeaders := traceHasWroteHeaderField(trace)
-
- // Header list size is ok. Write the headers.
- enumerateHeaders(func(name, value string) {
- name, ascii := lowerHeader(name)
- if !ascii {
- // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
- // field names have to be ASCII characters (just as in HTTP/1.x).
- return
- }
- cc.writeHeader(name, value)
- if traceHeaders {
- traceWroteHeaderField(trace, name, value)
- }
- })
-
- return cc.hbuf.Bytes(), nil
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
- if contentLength > 0 {
- return true
- }
- if contentLength < 0 {
- return false
- }
- // For zero bodies, whether we send a content-length depends on the method.
- // It also kinda doesn't matter for http2 either way, with END_STREAM.
- switch method {
- case "POST", "PUT", "PATCH":
- return true
- default:
- return false
- }
-}
-
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
@@ -2294,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
}
for k, vv := range trailer {
- lowKey, ascii := lowerHeader(k)
+ lowKey, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -2434,9 +2184,12 @@ func (rl *clientConnReadLoop) cleanup() {
// This avoids a situation where new connections are constantly created,
// added to the pool, fail, and are removed from the pool, without any error
// being surfaced to the user.
- const unusedWaitTime = 5 * time.Second
+ unusedWaitTime := 5 * time.Second
+ if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
+ unusedWaitTime = cc.idleTimeout
+ }
idleTime := cc.t.now().Sub(cc.lastActive)
- if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
@@ -2457,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.cond.Broadcast()
cc.mu.Unlock()
+
+ if !cc.seenSettings {
+ // If we have a pending request that wants extended CONNECT,
+ // let it continue and fail with the connection error.
+ cc.extendedConnectAllowed = true
+ close(cc.seenSettingsChan)
+ }
}
// countReadFrameError calls Transport.CountError with a string
@@ -2549,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
- if !cc.seenSettings {
- close(cc.seenSettingsChan)
- }
return err
}
}
@@ -2646,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range regularFields {
- key := canonicalHeader(hf.Name)
+ key := httpcommon.CanonicalHeader(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
@@ -2654,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
- t[canonicalHeader(v)] = nil
+ t[httpcommon.CanonicalHeader(v)] = nil
})
} else {
vv := header[key]
@@ -2778,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
trailer := make(http.Header)
for _, hf := range f.RegularFields() {
- key := canonicalHeader(hf.Name)
+ key := httpcommon.CanonicalHeader(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
@@ -3324,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
- errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
+ errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@@ -3508,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
}
}
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
- return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(k, []string{v})
- }
-}
-
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 6ff6bee7e..fdb35b947 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
)
// writeFramer is implemented by any type that is used to write frames.
@@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
for _, k := range keys {
vv := h[k]
- k, ascii := lowerHeader(k)
+ k, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
new file mode 100644
index 000000000..ed14da5af
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if lower(s[i]) != lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+ if !isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
similarity index 74%
rename from vendor/golang.org/x/net/http2/headermap.go
rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go
index 149b3dd20..92483d8e4 100644
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -1,11 +1,11 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package http2
+package httpcommon
import (
- "net/http"
+ "net/textproto"
"sync"
)
@@ -82,13 +82,15 @@ func buildCommonHeaderMaps() {
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
- chk := http.CanonicalHeaderKey(v)
+ chk := textproto.CanonicalMIMEHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
}
}
-func lowerHeader(v string) (lower string, ascii bool) {
+// LowerHeader returns the lowercase form of a header name,
+// used on the wire for HTTP/2 and HTTP/3 requests.
+func LowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
@@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
return asciiToLower(v)
}
-func canonicalHeader(v string) string {
+// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
+func CanonicalHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonCanonHeader[v]; ok {
return s
}
- return http.CanonicalHeaderKey(v)
+ return textproto.CanonicalMIMEHeaderKey(v)
+}
+
+// CachedCanonicalHeader returns the canonical form of a well-known header name.
+func CachedCanonicalHeader(v string) (string, bool) {
+ buildCommonHeaderMapsOnce()
+ s, ok := commonCanonHeader[v]
+ return s, ok
}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
new file mode 100644
index 000000000..4b7055317
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -0,0 +1,467 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+)
+
+var (
+ ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
+)
+
+// Request is a subset of http.Request.
+// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
+// without creating a dependency cycle.
+type Request struct {
+ URL *url.URL
+ Method string
+ Host string
+ Header map[string][]string
+ Trailer map[string][]string
+ ActualContentLength int64 // 0 means 0, -1 means unknown
+}
+
+// EncodeHeadersParam is parameters to EncodeHeaders.
+type EncodeHeadersParam struct {
+ Request Request
+
+ // AddGzipHeader indicates that an "accept-encoding: gzip" header should be
+ // added to the request.
+ AddGzipHeader bool
+
+ // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
+ PeerMaxHeaderListSize uint64
+
+ // DefaultUserAgent is the User-Agent header to send when the request
+ // neither contains a User-Agent nor disables it.
+ DefaultUserAgent string
+}
+
+// EncodeHeadersParam is the result of EncodeHeaders.
+type EncodeHeadersResult struct {
+ HasBody bool
+ HasTrailers bool
+}
+
+// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
+// It validates a request and calls headerf with each pseudo-header and header
+// for the request.
+// The headerf function is called with the validated, canonicalized header name.
+func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+ req := param.Request
+
+ // Check for invalid connection-level headers.
+ if err := checkConnHeaders(req.Header); err != nil {
+ return res, err
+ }
+
+ if req.URL == nil {
+ return res, errors.New("Request.URL is nil")
+ }
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return res, err
+ }
+ if !httpguts.ValidHostHeader(host) {
+ return res, errors.New("invalid Host header")
+ }
+
+ // isNormalConnect is true if this is a non-extended CONNECT request.
+ isNormalConnect := false
+ var protocol string
+ if vv := req.Header[":protocol"]; len(vv) > 0 {
+ protocol = vv[0]
+ }
+ if req.Method == "CONNECT" && protocol == "" {
+ isNormalConnect = true
+ } else if protocol != "" && req.Method != "CONNECT" {
+ return res, errors.New("invalid :protocol header in non-CONNECT request")
+ }
+
+ // Validate the path, except for non-extended CONNECT requests which have no path.
+ var path string
+ if !isNormalConnect {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return res, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers+trailers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ if err := validateHeaders(req.Header); err != "" {
+ return res, fmt.Errorf("invalid HTTP header %s", err)
+ }
+ if err := validateHeaders(req.Trailer); err != "" {
+ return res, fmt.Errorf("invalid HTTP trailer %s", err)
+ }
+
+ trailers, err := commaSeparatedTrailers(req.Trailer)
+ if err != nil {
+ return res, err
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production, see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = "GET"
+ }
+ f(":method", m)
+ if !isNormalConnect {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if protocol != "" {
+ f(":protocol", protocol)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if asciiEqualFold(k, "connection") ||
+ asciiEqualFold(k, "proxy-connection") ||
+ asciiEqualFold(k, "transfer-encoding") ||
+ asciiEqualFold(k, "upgrade") ||
+ asciiEqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if asciiEqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ } else if asciiEqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
+ } else if k == ":protocol" {
+ // :protocol pseudo-header was already sent above.
+ continue
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
+ f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
+ }
+ if param.AddGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", param.DefaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ if param.PeerMaxHeaderListSize > 0 {
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > param.PeerMaxHeaderListSize {
+ return res, ErrRequestHeaderListSize
+ }
+ }
+
+ trace := httptrace.ContextClientTrace(ctx)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name, ascii := LowerHeader(name)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ return
+ }
+
+ headerf(name, value)
+
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(name, []string{value})
+ }
+ })
+
+ res.HasBody = req.ActualContentLength != 0
+ res.HasTrailers = trailers != ""
+ return res, nil
+}
+
+// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
+// for a request.
+func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !disableCompression &&
+ len(header["Accept-Encoding"]) == 0 &&
+ len(header["Range"]) == 0 &&
+ method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ return true
+ }
+ return false
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+//
+// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
+// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
+//
+// Certain headers are special-cased as okay but not transmitted later.
+// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
+func checkConnHeaders(h map[string][]string) error {
+ if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Upgrade request header: %q", vv)
+ }
+ if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
+ keys := make([]string, 0, len(trailer))
+ for k := range trailer {
+ k = CanonicalHeader(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", fmt.Errorf("invalid Trailer key %q", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+func validateHeaders(hdrs map[string][]string) string {
+ for k, vv := range hdrs {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
+ return fmt.Sprintf("name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error,
+ // because it may be sensitive.
+ return fmt.Sprintf("value for header %q", k)
+ }
+ }
+ }
+ return ""
+}
+
+// shouldSendReqContentLength reports whether we should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// ServerRequestParam is parameters to NewServerRequest.
+type ServerRequestParam struct {
+ Method string
+ Scheme, Authority, Path string
+ Protocol string
+ Header map[string][]string
+}
+
+// ServerRequestResult is the result of NewServerRequest.
+type ServerRequestResult struct {
+ // Various http.Request fields.
+ URL *url.URL
+ RequestURI string
+ Trailer map[string][]string
+
+ NeedsContinue bool // client provided an "Expect: 100-continue" header
+
+ // If the request should be rejected, this is a short string suitable for passing
+ // to the http2 package's CountError function.
+ // It might be a bit odd to return errors this way rather than returing an error,
+ // but this ensures we don't forget to include a CountError reason.
+ InvalidReason string
+}
+
+func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
+ needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
+ if needsContinue {
+ delete(rp.Header, "Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
+ rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
+ }
+
+ // Setup Trailers
+ var trailer map[string][]string
+ for _, v := range rp.Header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(map[string][]string)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.Header, "Trailer")
+
+ // "':authority' MUST NOT include the deprecated userinfo subcomponent
+ // for "http" or "https" schemed URIs."
+ // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
+ if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
+ return ServerRequestResult{
+ InvalidReason: "userinfo_in_authority",
+ }
+ }
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.Method == "CONNECT" && rp.Protocol == "" {
+ url_ = &url.URL{Host: rp.Authority}
+ requestURI = rp.Authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.Path)
+ if err != nil {
+ return ServerRequestResult{
+ InvalidReason: "bad_path",
+ }
+ }
+ requestURI = rp.Path
+ }
+
+ return ServerRequestResult{
+ URL: url_,
+ NeedsContinue: needsContinue,
+ RequestURI: requestURI,
+ Trailer: trailer,
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
index 781770c20..48dbb9d84 100644
--- a/vendor/golang.org/x/oauth2/README.md
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -5,15 +5,6 @@
oauth2 package contains a client implementation for OAuth 2.0 spec.
-## Installation
-
-~~~~
-go get golang.org/x/oauth2
-~~~~
-
-Or you can manually git clone the repository to
-`$(go env GOPATH)/src/golang.org/x/oauth2`.
-
See pkg.go.dev for further documentation and examples.
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
@@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at
https://github.com/golang/oauth2/issues.
This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html. In particular:
+this repository, see https://go.dev/doc/contribute.
+
+The git repository is https://go.googlesource.com/oauth2.
+
+Note:
* Excluding trivial changes, all contributions should be connected to an existing issue.
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index df958359a..0260935ba 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -251,6 +251,12 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials
// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh
// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud
// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation).
+//
+// Important: If you accept a credential configuration (credential JSON/File/Stream) from an
+// external source for authentication to Google Cloud Platform, you must validate it before
+// providing it to any Google API or library. Providing an unvalidated credential configuration to
+// Google APIs can compromise the security of your systems and data. For more information, refer to
+// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) {
// Make defensive copy of the slices in params.
params = params.deepCopy()
@@ -294,6 +300,12 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params
}
// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes.
+//
+// Important: If you accept a credential configuration (credential JSON/File/Stream) from an
+// external source for authentication to Google Cloud Platform, you must validate it before
+// providing it to any Google API or library. Providing an unvalidated credential configuration to
+// Google APIs can compromise the security of your systems and data. For more information, refer to
+// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
var params CredentialsParams
params.Scopes = scopes
diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go
index ca27c2e98..55d59999e 100644
--- a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go
+++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go
@@ -28,7 +28,7 @@ import (
// AwsSecurityCredentials models AWS security credentials.
type AwsSecurityCredentials struct {
- // AccessKeyId is the AWS Access Key ID - Required.
+ // AccessKeyID is the AWS Access Key ID - Required.
AccessKeyID string `json:"AccessKeyID"`
// SecretAccessKey is the AWS Secret Access Key - Required.
SecretAccessKey string `json:"SecretAccessKey"`
diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go
index 6c81a6872..aa0bba2eb 100644
--- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go
+++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go
@@ -263,7 +263,7 @@ const (
fileTypeJSON = "json"
)
-// Format contains information needed to retireve a subject token for URL or File sourced credentials.
+// Format contains information needed to retrieve a subject token for URL or File sourced credentials.
type Format struct {
// Type should be either "text" or "json". This determines whether the file or URL sourced credentials
// expect a simple text subject token or if the subject token will be contained in a JSON object.
@@ -278,20 +278,52 @@ type Format struct {
type CredentialSource struct {
// File is the location for file sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
File string `json:"file"`
// Url is the URL to call for URL sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
URL string `json:"url"`
// Headers are the headers to attach to the request for URL sourced credentials.
Headers map[string]string `json:"headers"`
// Executable is the configuration object for executable sourced credentials.
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
Executable *ExecutableConfig `json:"executable"`
// EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS".
// One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
EnvironmentID string `json:"environment_id"`
// RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials.
RegionURL string `json:"region_url"`
@@ -329,7 +361,7 @@ type SubjectTokenSupplier interface {
type AwsSecurityCredentialsSupplier interface {
// AwsRegion should return the AWS region or an error.
AwsRegion(ctx context.Context, options SupplierOptions) (string, error)
- // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error.
+ // AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error.
// The external account token source does not cache the returned security credentials, so caching
// logic should be implemented in the supplier to prevent multiple requests for the same security credentials.
AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error)
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
index 95015648b..6f03a49d3 100644
--- a/vendor/golang.org/x/oauth2/jws/jws.go
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -165,11 +165,11 @@ func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
// Verify tests whether the provided JWT token's signature was produced by the private key
// associated with the supplied public key.
func Verify(token string, key *rsa.PublicKey) error {
- parts := strings.Split(token, ".")
- if len(parts) != 3 {
+ if strings.Count(token, ".") != 2 {
return errors.New("jws: invalid token received, token must have 3 parts")
}
+ parts := strings.SplitN(token, ".", 3)
signedContent := parts[0] + "." + parts[1]
signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 09f6a49b8..eacdd7fd9 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -56,7 +56,7 @@ type Config struct {
// the OAuth flow, after the resource owner's URLs.
RedirectURL string
- // Scope specifies optional requested permissions.
+ // Scopes specifies optional requested permissions.
Scopes []string
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is
@@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
if tf.refreshToken != tk.RefreshToken {
tf.refreshToken = tk.RefreshToken
}
- return tk, err
+ return tk, nil
}
// reuseTokenSource is a TokenSource that holds a single token in memory
@@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
return internal.ContextClient(ctx)
}
+ cc := internal.ContextClient(ctx)
return &http.Client{
Transport: &Transport{
- Base: internal.ContextClient(ctx).Transport,
+ Base: cc.Transport,
Source: ReuseTokenSource(nil, src),
},
+ CheckRedirect: cc.CheckRedirect,
+ Jar: cc.Jar,
+ Timeout: cc.Timeout,
}
}
diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go
index 50593b6df..6a95da975 100644
--- a/vendor/golang.org/x/oauth2/pkce.go
+++ b/vendor/golang.org/x/oauth2/pkce.go
@@ -21,7 +21,7 @@ const (
//
// A fresh verifier should be generated for each authorization.
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
-// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
+// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
// (or Config.DeviceAccessToken).
func GenerateVerifier() string {
// "RECOMMENDED that the output of a suitable random number generator be
@@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string {
}
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
-// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
+// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
// only.
func S256ChallengeOption(verifier string) AuthCodeOption {
return challengeOption{
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 5bbb33217..109997d77 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 02609d5b2..2e73ee197 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -72,6 +72,9 @@ var X86 struct {
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
+ HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
+ HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
+ HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
_ CacheLinePad
}
@@ -146,6 +149,18 @@ var ARM struct {
_ CacheLinePad
}
+// The booleans in Loong64 contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var Loong64 struct {
+ _ CacheLinePad
+ HasLSX bool // support 128-bit vector extension
+ HasLASX bool // support 256-bit vector extension
+ HasCRC32 bool // support CRC instruction
+ HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction
+ HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction
+ _ CacheLinePad
+}
+
// MIPS64X contains the supported CPU features of the current mips64/mips64le
// platforms. If the current platform is not mips64/mips64le or the current
// operating system is not Linux then all feature flags are false.
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
new file mode 100644
index 000000000..4f3411432
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
@@ -0,0 +1,22 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+// HWCAP bits. These are exposed by the Linux kernel.
+const (
+ hwcap_LOONGARCH_LSX = 1 << 4
+ hwcap_LOONGARCH_LASX = 1 << 5
+)
+
+func doinit() {
+ // TODO: Features that require kernel support like LSX and LASX can
+ // be detected here once needed in std library or by the compiler.
+ Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX)
+ Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX)
+}
+
+func hwcIsSet(hwc uint, val uint) bool {
+ return hwc&val != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index 7d902b684..a428dec9c 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
+//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
index 558635850..45ecb29ae 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
@@ -8,5 +8,43 @@ package cpu
const cacheLineSize = 64
+// Bit fields for CPUCFG registers, Related reference documents:
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg
+const (
+ // CPUCFG1 bits
+ cpucfg1_CRC32 = 1 << 25
+
+ // CPUCFG2 bits
+ cpucfg2_LAM_BH = 1 << 27
+ cpucfg2_LAMCAS = 1 << 28
+)
+
func initOptions() {
+ options = []option{
+ {Name: "lsx", Feature: &Loong64.HasLSX},
+ {Name: "lasx", Feature: &Loong64.HasLASX},
+ {Name: "crc32", Feature: &Loong64.HasCRC32},
+ {Name: "lam_bh", Feature: &Loong64.HasLAM_BH},
+ {Name: "lamcas", Feature: &Loong64.HasLAMCAS},
+ }
+
+ // The CPUCFG data on Loong64 only reflects the hardware capabilities,
+ // not the kernel support status, so features such as LSX and LASX that
+ // require kernel support cannot be obtained from the CPUCFG data.
+ //
+ // These features only require hardware capability support and do not
+ // require kernel specific support, so they can be obtained directly
+ // through CPUCFG
+ cfg1 := get_cpucfg(1)
+ cfg2 := get_cpucfg(2)
+
+ Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32)
+ Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS)
+ Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH)
+}
+
+func get_cpucfg(reg uint32) uint32
+
+func cfgIsSet(cfg uint32, val uint32) bool {
+ return cfg&val != 0
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
new file mode 100644
index 000000000..71cbaf1ce
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func get_cpucfg(reg uint32) uint32
+TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0
+ MOVW reg+0(FP), R5
+ // CPUCFG R5, R4 = 0x00006ca4
+ WORD $0x00006ca4
+ MOVW R4, ret+8(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 600a68078..1e642f330 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -53,6 +53,9 @@ func initOptions() {
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
+ {Name: "avxifma", Feature: &X86.HasAVXIFMA},
+ {Name: "avxvnni", Feature: &X86.HasAVXVNNI},
+ {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@@ -106,7 +109,7 @@ func archInit() {
return
}
- _, ebx7, ecx7, edx7 := cpuid(7, 0)
+ eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
@@ -134,14 +137,24 @@ func archInit() {
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
-
- eax71, _, _, _ := cpuid(7, 1)
- X86.HasAVX512BF16 = isSet(5, eax71)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
+
+ // These features depend on the second level of extended features.
+ if eax7 >= 1 {
+ eax71, _, _, edx71 := cpuid(7, 1)
+ if X86.HasAVX512 {
+ X86.HasAVX512BF16 = isSet(5, eax71)
+ }
+ if X86.HasAVX {
+ X86.HasAVXIFMA = isSet(23, eax71)
+ X86.HasAVXVNNI = isSet(4, eax71)
+ X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ }
+ }
}
func isSet(bitpos uint, value uint32) bool {
diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go
index 762b63d68..56a7e1a17 100644
--- a/vendor/golang.org/x/sys/cpu/parse.go
+++ b/vendor/golang.org/x/sys/cpu/parse.go
@@ -13,7 +13,7 @@ import "strconv"
// https://golang.org/cl/209597.
func parseRelease(rel string) (major, minor, patch int, ok bool) {
// Strip anything after a dash or plus.
- for i := 0; i < len(rel); i++ {
+ for i := range len(rel) {
if rel[i] == '-' || rel[i] == '+' {
rel = rel[:i]
break
@@ -21,7 +21,7 @@ func parseRelease(rel string) (major, minor, patch int, ok bool) {
}
next := func() (int, bool) {
- for i := 0; i < len(rel); i++ {
+ for i := range len(rel) {
if rel[i] == '.' {
ver, err := strconv.Atoi(rel[:i])
rel = rel[i+1:]
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 000000000..37a82528f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+ vec := runtime_getAuxv()
+ vecLen := len(vec)
+
+ if vecLen == 0 {
+ return nil, syscall.ENOENT
+ }
+
+ if vecLen%2 != 0 {
+ return nil, syscall.EINVAL
+ }
+
+ result := make([]uintptr, vecLen)
+ copy(result, vec)
+ return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 000000000..1200487f2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+ return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 099867dee..798f61ad3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,7 +602,150 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return
}
-//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
+// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
+const minIovec = 8
+
+func Readv(fd int, iovs [][]byte) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = readv(fd, iovecs)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = preadv(fd, iovecs, offset)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Writev(fd int, iovs [][]byte) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = writev(fd, iovecs)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = pwritev(fd, iovecs, offset)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+ for _, b := range bs {
+ var v Iovec
+ v.SetLen(len(b))
+ if len(b) > 0 {
+ v.Base = &b[0]
+ } else {
+ v.Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ vecs = append(vecs, v)
+ }
+ return vecs
+}
+
+func writevRacedetect(iovecs []Iovec, n int) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+}
+
+func readvRacedetect(iovecs []Iovec, n int, err error) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+}
+
+func darwinMajorMinPatch() (maj, min, patch int, err error) {
+ var un Utsname
+ err = Uname(&un)
+ if err != nil {
+ return
+ }
+
+ var mmp [3]int
+ c := 0
+Loop:
+ for _, b := range un.Release[:] {
+ switch {
+ case b >= '0' && b <= '9':
+ mmp[c] = 10*mmp[c] + int(b-'0')
+ case b == '.':
+ c++
+ if c > 2 {
+ return 0, 0, 0, ENOTSUP
+ }
+ case b == 0:
+ break Loop
+ default:
+ return 0, 0, 0, ENOTSUP
+ }
+ }
+ if c != 2 {
+ return 0, 0, 0, ENOTSUP
+ }
+ return mmp[0], mmp[1], mmp[2], nil
+}
+
+func darwinKernelVersionMin(maj, min, patch int) bool {
+ actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch()
+ if err != nil {
+ return false
+ }
+ return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch)
+}
+
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
@@ -705,3 +848,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readv(fd int, iovecs []Iovec) (n int, err error)
+//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error)
+//sys writev(fd int, iovecs []Iovec) (n int, err error)
+//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 97cb916f2..be8c00207 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
+func Dup3(oldfd, newfd, flags int) error {
+ if oldfd == newfd || flags&^O_CLOEXEC != 0 {
+ return EINVAL
+ }
+ how := F_DUP2FD
+ if flags&O_CLOEXEC != 0 {
+ how = F_DUP2FD_CLOEXEC
+ }
+ _, err := fcntl(oldfd, how, newfd)
+ return err
+}
+
/*
* Exposed directly
*/
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 230a94549..4958a6570 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@ package unix
import (
"encoding/binary"
+ "slices"
"strconv"
"syscall"
"time"
@@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
- for i := 0; i < n; i++ {
+ for i := range n {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
@@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
- for i := 0; i < len(sa.Addr); i++ {
+ for i := range len(sa.Addr) {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
@@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
@@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
n := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Addr[i] = n[i]
}
p := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+8] = p[i]
}
sa.raw.Addr[12] = sa.Addr
@@ -911,7 +912,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) {
// These are EBCDIC encoded by the kernel, but we still need to pad them
// with blanks. Initializing with blanks allows the caller to feed in either
// a padded or an unpadded string.
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Nodeid[i] = ' '
sa.raw.User_id[i] = ' '
sa.raw.Name[i] = ' '
@@ -1148,7 +1149,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
var user [8]byte
var name [8]byte
- for i := 0; i < 8; i++ {
+ for i := range 8 {
user[i] = byte(pp.User_id[i])
name[i] = byte(pp.Name[i])
}
@@ -1173,11 +1174,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
name := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
name[i] = pp.Addr[i]
}
pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
pgn[i] = pp.Addr[i+8]
}
addr := (*[1]byte)(unsafe.Pointer(&sa.Addr))
@@ -1188,11 +1189,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
rx[i] = pp.Addr[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
tx[i] = pp.Addr[i+4]
}
return sa, nil
@@ -2216,10 +2217,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2270,10 +2268,7 @@ func writevRacedetect(iovecs []Iovec, n int) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2320,12 +2315,7 @@ func isGroupMember(gid int) bool {
return false
}
- for _, g := range groups {
- if g == gid {
- return true
- }
- }
- return false
+ return slices.Contains(groups, gid)
}
func isCapDacOverrideSet() bool {
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 21974af06..abc395547 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}
+
+// Ucred Helpers
+// See ucred(3c) and getpeerucred(3c)
+
+//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
+//sys ucredFree(ucred uintptr) = ucred_free
+//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
+//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
+//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
+//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
+//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
+//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
+//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
+//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
+
+// Ucred is an opaque struct that holds user credentials.
+type Ucred struct {
+ ucred uintptr
+}
+
+// We need to ensure that ucredFree is called on the underlying ucred
+// when the Ucred is garbage collected.
+func ucredFinalizer(u *Ucred) {
+ ucredFree(u.ucred)
+}
+
+func GetPeerUcred(fd uintptr) (*Ucred, error) {
+ var ucred uintptr
+ err := getpeerucred(fd, &ucred)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func UcredGet(pid int) (*Ucred, error) {
+ ucred, err := ucredGet(pid)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func (u *Ucred) Geteuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGeteuid(u.ucred)
+}
+
+func (u *Ucred) Getruid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetruid(u.ucred)
+}
+
+func (u *Ucred) Getsuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsuid(u.ucred)
+}
+
+func (u *Ucred) Getegid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetegid(u.ucred)
+}
+
+func (u *Ucred) Getrgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetrgid(u.ucred)
+}
+
+func (u *Ucred) Getsgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsgid(u.ucred)
+}
+
+func (u *Ucred) Getpid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetpid(u.ucred)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6ebc48b3f..4f432bfe8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1245,6 +1245,7 @@ const (
FAN_REPORT_DFID_NAME = 0xc00
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
FAN_REPORT_DIR_FID = 0x400
+ FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
@@ -1330,8 +1331,10 @@ const (
FUSE_SUPER_MAGIC = 0x65735546
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
+ F_CREATED_QUERY = 0x404
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
+ F_DUPFD_QUERY = 0x403
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
@@ -1551,6 +1554,7 @@ const (
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
+ IPPROTO_SMC = 0x100
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@@ -1623,6 +1627,8 @@ const (
IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
@@ -1867,6 +1873,7 @@ const (
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MADV_WIPEONFORK = 0x12
+ MAP_DROPPABLE = 0x8
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
@@ -1967,6 +1974,7 @@ const (
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
+ MSG_SOCK_DEVMEM = 0x2000000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
@@ -2083,6 +2091,7 @@ const (
NFC_ATR_REQ_MAXSIZE = 0x40
NFC_ATR_RES_GB_MAXSIZE = 0x2f
NFC_ATR_RES_MAXSIZE = 0x40
+ NFC_ATS_MAXSIZE = 0x14
NFC_COMM_ACTIVE = 0x0
NFC_COMM_PASSIVE = 0x1
NFC_DEVICE_NAME_MAXSIZE = 0x8
@@ -2163,6 +2172,7 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
+ NFT_BITWISE_BOOL = 0x0
NFT_CHAIN_FLAGS = 0x7
NFT_CHAIN_MAXNAMELEN = 0x100
NFT_CT_MAX = 0x17
@@ -2491,6 +2501,7 @@ const (
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
+ PR_GET_SHADOW_STACK_STATUS = 0x4a
PR_GET_SPECULATION_CTRL = 0x34
PR_GET_TAGGED_ADDR_CTRL = 0x38
PR_GET_THP_DISABLE = 0x2a
@@ -2499,6 +2510,7 @@ const (
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
+ PR_LOCK_SHADOW_STACK_STATUS = 0x4c
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
@@ -2525,6 +2537,8 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
+ PR_PMLEN_MASK = 0x7f000000
+ PR_PMLEN_SHIFT = 0x18
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
@@ -2592,6 +2606,7 @@ const (
PR_SET_PTRACER = 0x59616d61
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
+ PR_SET_SHADOW_STACK_STATUS = 0x4b
PR_SET_SPECULATION_CTRL = 0x35
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
PR_SET_TAGGED_ADDR_CTRL = 0x37
@@ -2602,6 +2617,9 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
+ PR_SHADOW_STACK_ENABLE = 0x1
+ PR_SHADOW_STACK_PUSH = 0x4
+ PR_SHADOW_STACK_WRITE = 0x2
PR_SME_GET_VL = 0x40
PR_SME_SET_VL = 0x3f
PR_SME_SET_VL_ONEXEC = 0x40000
@@ -2911,7 +2929,6 @@ const (
RTM_NEWNEXTHOP = 0x68
RTM_NEWNEXTHOPBUCKET = 0x74
RTM_NEWNSID = 0x58
- RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@@ -2920,6 +2937,7 @@ const (
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NEWTUNNEL = 0x78
+ RTM_NEWVLAN = 0x70
RTM_NR_FAMILIES = 0x1b
RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c0d45e320..75207613c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -304,6 +306,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c731d24f0..c68acda53 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -305,6 +307,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 680018a4a..a8c607ab8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -310,6 +312,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index a63909f30..18563dd8d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -109,6 +109,7 @@ const (
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
+ GCS_MAGIC = 0x47435300
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
@@ -119,6 +120,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -302,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9b0a2573f..22912cdaa 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -297,6 +299,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 958e6e064..29344eb37 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 50c7f25bd..20d51fb96 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ced21d66d..321b60902 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 226c04419..9bacdf1e2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 3122737cd..c22427261 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -358,6 +360,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index eb5d3467e..6270c8ee1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e921ebc60..9966c1941 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 38ba81c55..848e5fcc4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -294,6 +296,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 71f040097..669b2adb8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -366,6 +368,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c44a31332..4834e5751 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -119,6 +119,8 @@ const (
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -357,6 +359,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x38
SCM_TIMESTAMPING_PKTINFO = 0x3c
SCM_TIMESTAMPNS = 0x21
+ SCM_TS_OPT_ID = 0x5a
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 24b346e1a..813c05b66 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index ebd213100..fda328582 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 824b9c2d5..e6f58f3c6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4f178a229..7f8998b90 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 829b87feb..c6545413c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -141,6 +141,16 @@ import (
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
+//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
+//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
+//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@@ -280,6 +290,16 @@ import (
//go:linkname procgetpeername libc_getpeername
//go:linkname procsetsockopt libc_setsockopt
//go:linkname procrecvfrom libc_recvfrom
+//go:linkname procgetpeerucred libc_getpeerucred
+//go:linkname procucred_get libc_ucred_get
+//go:linkname procucred_geteuid libc_ucred_geteuid
+//go:linkname procucred_getegid libc_ucred_getegid
+//go:linkname procucred_getruid libc_ucred_getruid
+//go:linkname procucred_getrgid libc_ucred_getrgid
+//go:linkname procucred_getsuid libc_ucred_getsuid
+//go:linkname procucred_getsgid libc_ucred_getsgid
+//go:linkname procucred_getpid libc_ucred_getpid
+//go:linkname procucred_free libc_ucred_free
//go:linkname procport_create libc_port_create
//go:linkname procport_associate libc_port_associate
//go:linkname procport_dissociate libc_port_dissociate
@@ -420,6 +440,16 @@ var (
procgetpeername,
procsetsockopt,
procrecvfrom,
+ procgetpeerucred,
+ procucred_get,
+ procucred_geteuid,
+ procucred_getegid,
+ procucred_getruid,
+ procucred_getrgid,
+ procucred_getsuid,
+ procucred_getsgid,
+ procucred_getpid,
+ procucred_free,
procport_create,
procport_associate,
procport_dissociate,
@@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGet(pid int) (ucred uintptr, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+ ucred = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGeteuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetegid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetruid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetrgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetpid(ucred uintptr) (pid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredFree(ucred uintptr) {
+ sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func port_create() (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 524b0820c..c79aaff30 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -458,4 +458,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f485dbf45..5eb450695 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -381,4 +381,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 70b35bf3b..05e502974 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -422,4 +422,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1893e2fe8..38c53ec51 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -325,4 +325,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 16a4017da..31d2e71a1 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -321,4 +321,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 7e567f1ef..f4184a336 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 38ae55e5e..05b996227 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 55e92e60a..43a256e9e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 60658d6a0..eea5ddfc2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index e203e8a7e..0d777bfbb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -449,4 +449,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 5944b97d5..b44636502 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index c66d416da..0c7d21c18 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index a5459e766..840539169 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -326,4 +326,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 01d86825b..fcf1b790d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -387,4 +387,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 7b703e77c..52d15b5f9 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -400,4 +400,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 5537148dc..a46abe647 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -4747,7 +4747,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14c
+ NL80211_ATTR_MAX = 0x14d
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5519,7 +5519,7 @@ const (
NL80211_MNTR_FLAG_CONTROL = 0x3
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
NL80211_MNTR_FLAG_FCSFAIL = 0x1
- NL80211_MNTR_FLAG_MAX = 0x6
+ NL80211_MNTR_FLAG_MAX = 0x7
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
NL80211_MPATH_FLAG_ACTIVE = 0x1
@@ -6174,3 +6174,5 @@ type SockDiagReq struct {
Family uint8
Protocol uint8
}
+
+const RTM_NEWNVLAN = 0x70
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index 4e613cf63..3ca814f54 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -43,8 +43,8 @@ type DLL struct {
// LoadDLL loads DLL file into memory.
//
// Warning: using LoadDLL without an absolute path name is subject to
-// DLL preloading attacks. To safely load a system DLL, use LazyDLL
-// with System set to true, or use LoadLibraryEx directly.
+// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
+// or use [LoadLibraryEx] directly.
func LoadDLL(name string) (dll *DLL, err error) {
namep, err := UTF16PtrFromString(name)
if err != nil {
@@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
}
// NewLazyDLL creates new LazyDLL associated with DLL file.
+//
+// Warning: using NewLazyDLL without an absolute path name is subject to
+// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
func NewLazyDLL(name string) *LazyDLL {
return &LazyDLL{Name: name}
}
@@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
}
return &DLL{Name: name, Handle: h}, nil
}
-
-type errString string
-
-func (s errString) Error() string { return string(s) }
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
index fd8632444..39aeeb644 100644
--- a/vendor/golang.org/x/sys/windows/registry/key.go
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -164,7 +164,12 @@ loopItems:
func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
var h syscall.Handle
var d uint32
- err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+ var pathPointer *uint16
+ pathPointer, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, false, err
+ }
+ err = regCreateKeyEx(syscall.Handle(k), pathPointer,
0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
if err != nil {
return 0, false, err
@@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool
// DeleteKey deletes the subkey path of key k and its values.
func DeleteKey(k Key, path string) error {
- return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+ pathPointer, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return regDeleteKey(syscall.Handle(k), pathPointer)
}
// A KeyInfo describes the statistics of a key. It is returned by Stat.
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
index 74db26b94..a1bcbb236 100644
--- a/vendor/golang.org/x/sys/windows/registry/value.go
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error {
// DeleteValue removes a named value from the key k.
func (k Key) DeleteValue(name string) error {
- return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+ namePointer, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ return regDeleteValue(syscall.Handle(k), namePointer)
}
// ReadValueNames returns the value names of key k.
diff --git a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go
index f279444d9..ad40c2f48 100644
--- a/vendor/golang.org/x/sys/windows/svc/eventlog/log.go
+++ b/vendor/golang.org/x/sys/windows/svc/eventlog/log.go
@@ -29,11 +29,19 @@ func OpenRemote(host, source string) (*Log, error) {
if source == "" {
return nil, errors.New("Specify event log source")
}
- var s *uint16
+ var hostPointer *uint16
if host != "" {
- s = syscall.StringToUTF16Ptr(host)
+ var err error
+ hostPointer, err = syscall.UTF16PtrFromString(host)
+ if err != nil {
+ return nil, err
+ }
}
- h, err := windows.RegisterEventSource(s, syscall.StringToUTF16Ptr(source))
+ sourcePointer, err := syscall.UTF16PtrFromString(source)
+ if err != nil {
+ return nil, err
+ }
+ h, err := windows.RegisterEventSource(hostPointer, sourcePointer)
if err != nil {
return nil, err
}
@@ -46,7 +54,11 @@ func (l *Log) Close() error {
}
func (l *Log) report(etype uint16, eid uint32, msg string) error {
- ss := []*uint16{syscall.StringToUTF16Ptr(msg)}
+ msgPointer, err := syscall.UTF16PtrFromString(msg)
+ if err != nil {
+ return err
+ }
+ ss := []*uint16{msgPointer}
return windows.ReportEvent(l.Handle, etype, 0, eid, 0, 1, 0, &ss[0], nil)
}
diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go
index c4f74924d..a9b1c192d 100644
--- a/vendor/golang.org/x/sys/windows/svc/service.go
+++ b/vendor/golang.org/x/sys/windows/svc/service.go
@@ -132,10 +132,10 @@ type ctlEvent struct {
// service provides access to windows service api.
type service struct {
- name string
- h windows.Handle
- c chan ctlEvent
- handler Handler
+ namePointer *uint16
+ h windows.Handle
+ c chan ctlEvent
+ handler Handler
}
type exitCode struct {
@@ -209,7 +209,7 @@ var theService service // This is, unfortunately, a global, which means only one
// serviceMain is the entry point called by the service manager, registered earlier by
// the call to StartServiceCtrlDispatcher.
func serviceMain(argc uint32, argv **uint16) uintptr {
- handle, err := windows.RegisterServiceCtrlHandlerEx(windows.StringToUTF16Ptr(theService.name), ctlHandlerCallback, 0)
+ handle, err := windows.RegisterServiceCtrlHandlerEx(theService.namePointer, ctlHandlerCallback, 0)
if sysErr, ok := err.(windows.Errno); ok {
return uintptr(sysErr)
} else if err != nil {
@@ -280,15 +280,21 @@ loop:
// Run executes service name by calling appropriate handler function.
func Run(name string, handler Handler) error {
+ // Check to make sure that the service name is valid.
+ namePointer, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+
initCallbacks.Do(func() {
ctlHandlerCallback = windows.NewCallback(ctlHandler)
serviceMainCallback = windows.NewCallback(serviceMain)
})
- theService.name = name
+ theService.namePointer = namePointer
theService.handler = handler
theService.c = make(chan ctlEvent)
t := []windows.SERVICE_TABLE_ENTRY{
- {ServiceName: windows.StringToUTF16Ptr(theService.name), ServiceProc: serviceMainCallback},
+ {ServiceName: namePointer, ServiceProc: serviceMainCallback},
{ServiceName: nil, ServiceProc: 0},
}
return windows.StartServiceCtrlDispatcher(&t[0])
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 9d138de5f..ad67df2fd 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1074,6 +1074,7 @@ const (
IP_ADD_MEMBERSHIP = 0xc
IP_DROP_MEMBERSHIP = 0xd
IP_PKTINFO = 0x13
+ IP_MTU_DISCOVER = 0x47
IPV6_V6ONLY = 0x1b
IPV6_UNICAST_HOPS = 0x4
@@ -1083,6 +1084,7 @@ const (
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_PKTINFO = 0x13
+ IPV6_MTU_DISCOVER = 0x47
MSG_OOB = 0x1
MSG_PEEK = 0x2
@@ -1132,6 +1134,15 @@ const (
WSASYS_STATUS_LEN = 128
)
+// enum PMTUD_STATE from ws2ipdef.h
+const (
+ IP_PMTUDISC_NOT_SET = 0
+ IP_PMTUDISC_DO = 1
+ IP_PMTUDISC_DONT = 2
+ IP_PMTUDISC_PROBE = 3
+ IP_PMTUDISC_MAX = 4
+)
+
type WSABuf struct {
Len uint32
Buf *byte
@@ -1146,6 +1157,22 @@ type WSAMsg struct {
Flags uint32
}
+type WSACMSGHDR struct {
+ Len uintptr
+ Level int32
+ Type int32
+}
+
+type IN_PKTINFO struct {
+ Addr [4]byte
+ Ifindex uint32
+}
+
+type IN6_PKTINFO struct {
+ Addr [16]byte
+ Ifindex uint32
+}
+
// Flags for WSASocket
const (
WSA_FLAG_OVERLAPPED = 0x01
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f493..794b2e32b 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int {
// TokensAt returns the number of tokens available at time t.
func (lim *Limiter) TokensAt(t time.Time) float64 {
lim.mu.Lock()
- _, tokens := lim.advance(t) // does not mutate lim
+ tokens := lim.advance(t) // does not mutate lim
lim.mu.Unlock()
return tokens
}
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -185,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) {
return
}
// advance time to now
- t, tokens := r.lim.advance(t)
+ tokens := r.lim.advance(t)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
@@ -306,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -323,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -344,21 +345,9 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
@@ -391,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
return r
}
-// advance calculates and returns an updated state for lim resulting from the passage of time.
+// advance calculates and returns an updated number of tokens for lim
+// resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
-func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
+func (lim *Limiter) advance(t time.Time) (newTokens float64) {
last := lim.last
if t.Before(last) {
last = t
@@ -407,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
- return t, tokens
+ return tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
@@ -416,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
- seconds := tokens / float64(limit)
- return time.Duration(float64(time.Second) * seconds)
+
+ duration := (tokens / float64(limit)) * float64(time.Second)
+
+ // Cap the duration to the maximum representable int64 value, to avoid overflow.
+ if duration > float64(math.MaxInt64) {
+ return InfDuration
+ }
+
+ return time.Duration(duration)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
index b5e38c662..f8a85d5a4 100644
--- a/vendor/google.golang.org/api/googleapi/googleapi.go
+++ b/vendor/google.golang.org/api/googleapi/googleapi.go
@@ -145,22 +145,54 @@ func CheckResponse(res *http.Response) error {
}
slurp, err := io.ReadAll(res.Body)
if err == nil {
- jerr := new(errorReply)
- err = json.Unmarshal(slurp, jerr)
- if err == nil && jerr.Error != nil {
- if jerr.Error.Code == 0 {
- jerr.Error.Code = res.StatusCode
- }
- jerr.Error.Body = string(slurp)
- jerr.Error.Header = res.Header
- return jerr.Error
- }
+ return CheckResponseWithBody(res, slurp)
}
return &Error{
Code: res.StatusCode,
Body: string(slurp),
Header: res.Header,
}
+
+}
+
+// CheckResponseWithBody returns an error (of type *Error) if the response
+// status code is not 2xx. Distinct from CheckResponse to allow for checking
+// a previously-read body to maintain error detail content.
+func CheckResponseWithBody(res *http.Response, body []byte) error {
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ return nil
+ }
+
+ jerr, err := errorReplyFromBody(body)
+ if err == nil && jerr.Error != nil {
+ if jerr.Error.Code == 0 {
+ jerr.Error.Code = res.StatusCode
+ }
+ jerr.Error.Body = string(body)
+ jerr.Error.Header = res.Header
+ return jerr.Error
+ }
+
+ return &Error{
+ Code: res.StatusCode,
+ Body: string(body),
+ Header: res.Header,
+ }
+}
+
+// errorReplyFromBody attempts to get the error from body. The body
+// may be a JSON object or JSON array, or may be something else.
+func errorReplyFromBody(body []byte) (*errorReply, error) {
+ jerr := new(errorReply)
+ if len(body) > 0 && body[0] == '[' {
+ // Attempt JSON array
+ jsonArr := []*errorReply{jerr}
+ err := json.Unmarshal(body, &jsonArr)
+ return jerr, err
+ }
+ // Attempt JSON object
+ err := json.Unmarshal(body, jerr)
+ return jerr, err
}
// IsNotModified reports whether err is the result of the
@@ -200,7 +232,17 @@ var WithDataWrapper = MarshalStyle(true)
// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper.
var WithoutDataWrapper = MarshalStyle(false)
+// JSONReader is like JSONBuffer, but returns an io.Reader instead.
func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
+ buf, err := wrap.JSONBuffer(v)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+// JSONBuffer encodes the body and wraps it if needed.
+func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) {
buf := new(bytes.Buffer)
if wrap {
buf.Write([]byte(`{"data": `))
@@ -259,6 +301,20 @@ func ChunkSize(size int) MediaOption {
return chunkSizeOption(size)
}
+type chunkTransferTimeoutOption time.Duration
+
+func (cd chunkTransferTimeoutOption) setOptions(o *MediaOptions) {
+ o.ChunkTransferTimeout = time.Duration(cd)
+}
+
+// ChunkTransferTimeout returns a MediaOption which sets a per-chunk
+// transfer timeout for resumable uploads. If a single chunk has been
+// attempting to upload for longer than this time then the old req got canceled and retried.
+// The default is no timeout for the request.
+func ChunkTransferTimeout(timeout time.Duration) MediaOption {
+ return chunkTransferTimeoutOption(timeout)
+}
+
type chunkRetryDeadlineOption time.Duration
func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) {
@@ -283,6 +339,7 @@ type MediaOptions struct {
ForceEmptyContentType bool
ChunkSize int
ChunkRetryDeadline time.Duration
+ ChunkTransferTimeout time.Duration
}
// ProcessMediaOptions stores options from opts in a MediaOptions.
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
index 4ebeb61c1..86861e243 100644
--- a/vendor/google.golang.org/api/internal/creds.go
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -15,6 +15,7 @@ import (
"os"
"time"
+ "cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/oauth2adapt"
"golang.org/x/oauth2"
@@ -30,7 +31,7 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
// it returns default credential information.
func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
if ds.IsNewAuthLibraryEnabled() {
- return credsNewAuth(ctx, ds)
+ return credsNewAuth(ds)
}
creds, err := baseCreds(ctx, ds)
if err != nil {
@@ -42,6 +43,30 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
return creds, nil
}
+// AuthCreds returns [cloud.google.com/go/auth.Credentials] based on credentials
+// options provided via [option.ClientOption], including legacy oauth2/google
+// options. If there are no applicable options, then it returns the result of
+// [cloud.google.com/go/auth/credentials.DetectDefault].
+func AuthCreds(ctx context.Context, settings *DialSettings) (*auth.Credentials, error) {
+ if settings.AuthCredentials != nil {
+ return settings.AuthCredentials, nil
+ }
+ // Support oauth2/google options
+ var oauth2Creds *google.Credentials
+ if settings.InternalCredentials != nil {
+ oauth2Creds = settings.InternalCredentials
+ } else if settings.Credentials != nil {
+ oauth2Creds = settings.Credentials
+ } else if settings.TokenSource != nil {
+ oauth2Creds = &google.Credentials{TokenSource: settings.TokenSource}
+ }
+ if oauth2Creds != nil {
+ return oauth2adapt.AuthCredentialsFromOauth2Credentials(oauth2Creds), nil
+ }
+
+ return detectDefaultFromDialSettings(settings)
+}
+
// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport.
// The OAuth2 transport and endpoint will be configured for mTLS if applicable.
func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) {
@@ -62,7 +87,7 @@ func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string
return tokenURL, oauth2Client, nil
}
-func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) {
+func credsNewAuth(settings *DialSettings) (*google.Credentials, error) {
// Preserve old options behavior
if settings.InternalCredentials != nil {
return settings.InternalCredentials, nil
@@ -76,6 +101,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti
return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil
}
+ creds, err := detectDefaultFromDialSettings(settings)
+ if err != nil {
+ return nil, err
+ }
+ return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil
+}
+
+func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, error) {
var useSelfSignedJWT bool
var aud string
var scopes []string
@@ -100,18 +133,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti
aud = settings.DefaultAudience
}
- creds, err := credentials.DetectDefault(&credentials.DetectOptions{
+ return credentials.DetectDefault(&credentials.DetectOptions{
Scopes: scopes,
Audience: aud,
CredentialsFile: settings.CredentialsFile,
CredentialsJSON: settings.CredentialsJSON,
UseSelfSignedJWT: useSelfSignedJWT,
+ Logger: settings.Logger,
})
- if err != nil {
- return nil, err
- }
-
- return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil
}
func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 32949cccb..beec4ea0d 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -8,6 +8,7 @@ package internal
import (
"crypto/tls"
"errors"
+ "log/slog"
"net/http"
"os"
"strconv"
@@ -62,6 +63,8 @@ type DialSettings struct {
AllowNonDefaultServiceAccount bool
DefaultUniverseDomain string
UniverseDomain string
+ AllowHardBoundTokens []string
+ Logger *slog.Logger
// Google API system parameters. For more information please read:
// https://cloud.google.com/apis/docs/system-parameters
QuotaProject string
@@ -70,6 +73,9 @@ type DialSettings struct {
// New Auth library Options
AuthCredentials *auth.Credentials
EnableNewAuthLibrary bool
+
+ // TODO(b/372244283): Remove after b/358175516 has been fixed
+ EnableAsyncRefreshDryRun func()
}
// GetScopes returns the user-provided scopes, if set, or else falls back to the
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 48b023ade..618a9fd35 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.192.0"
+const Version = "0.228.0"
diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
index e6b5c1025..18fec9c98 100644
--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
+++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
@@ -6,6 +6,11 @@
package internaloption
import (
+ "context"
+ "log/slog"
+
+ "cloud.google.com/go/auth"
+ "github.com/googleapis/gax-go/v2/internallog"
"golang.org/x/oauth2/google"
"google.golang.org/api/internal"
"google.golang.org/api/option"
@@ -181,6 +186,33 @@ func (w enableJwtWithScope) Apply(o *internal.DialSettings) {
o.EnableJwtWithScope = bool(w)
}
+// AllowHardBoundTokens returns a ClientOption that allows libraries to request a hard-bound token.
+// Obtaining hard-bound tokens requires the connection to be established using either Application
+// Layer Transport Security (ALTS) or mutual TLS (mTLS) with S2A. For more information on ALTS,
+// see: https://cloud.google.com/docs/security/encryption-in-transit/application-layer-transport-security
+//
+// The AllowHardBoundTokens option accepts the following values (or a combination thereof):
+//
+// - "MTLS_S2A": Allows obtaining hard-bound tokens when the connection uses mutual TLS with S2A.
+// - "ALTS": Allows obtaining hard-bound tokens when the connection uses ALTS.
+//
+// For example, to allow obtaining hard-bound tokens with either MTLS_S2A or ALTS, you would
+// provide both values (e.g., {"MTLS_S2A","ALTS"}). If no value is provided, hard-bound tokens
+// will not be requested.
+//
+// It should only be used internally by generated clients.
+// This is an EXPERIMENTAL API and may be changed or removed in the future.
+func AllowHardBoundTokens(protocol ...string) option.ClientOption {
+ return allowHardBoundTokens(protocol)
+}
+
+type allowHardBoundTokens []string
+
+func (a allowHardBoundTokens) Apply(o *internal.DialSettings) {
+ o.AllowHardBoundTokens = make([]string, len(a))
+ copy(o.AllowHardBoundTokens, a)
+}
+
// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls.
// This credential takes precedence over all other credential options.
func WithCredentials(creds *google.Credentials) option.ClientOption {
@@ -206,9 +238,79 @@ func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) {
o.EnableNewAuthLibrary = bool(w)
}
+// EnableAsyncRefreshDryRun returns a ClientOption that specifies if libraries in this
+// module should asynchronously refresh auth token in parallel to sync refresh.
+//
+// This option can be used to determine whether refreshing the token asymnchronously
+// prior to its actual expiry works without any issues in a particular environment.
+//
+// errHandler function will be called when there is an error while refreshing
+// the token asynchronously.
+//
+// This is an EXPERIMENTAL option and will be removed in the future.
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+func EnableAsyncRefreshDryRun(errHandler func()) option.ClientOption {
+ return enableAsyncRefreshDryRun{
+ errHandler: errHandler,
+ }
+}
+
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+type enableAsyncRefreshDryRun struct {
+ errHandler func()
+}
+
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+func (w enableAsyncRefreshDryRun) Apply(o *internal.DialSettings) {
+ o.EnableAsyncRefreshDryRun = w.errHandler
+}
+
// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to
// create their own client options by embedding this type into their own
// client-specific option wrapper. See example for usage.
type EmbeddableAdapter struct{}
func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {}
+
+// GetLogger is a helper for client libraries to extract the [slog.Logger] from
+// the provided options or return a default logger if one is not found.
+//
+// It should only be used internally by generated clients. This is an EXPERIMENTAL API
+// and may be changed or removed in the future.
+func GetLogger(opts []option.ClientOption) *slog.Logger {
+ var ds internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&ds)
+ }
+ return internallog.New(ds.Logger)
+}
+
+// AuthCreds returns [cloud.google.com/go/auth.Credentials] using the following
+// options provided via [option.ClientOption], including legacy oauth2/google
+// options, in this order:
+//
+// * [option.WithAuthCredentials]
+// * [option/internaloption.WithCredentials] (internal use only)
+// * [option.WithCredentials]
+// * [option.WithTokenSource]
+//
+// If there are no applicable credentials options, then it passes the
+// following options to [cloud.google.com/go/auth/credentials.DetectDefault] and
+// returns the result:
+//
+// * [option.WithAudiences]
+// * [option.WithCredentialsFile]
+// * [option.WithCredentialsJSON]
+// * [option.WithScopes]
+// * [option/internaloption.WithDefaultScopes] (internal use only)
+// * [option/internaloption.EnableJwtWithScope] (internal use only)
+//
+// This function should only be used internally by generated clients. This is an
+// EXPERIMENTAL API and may be changed or removed in the future.
+func AuthCreds(ctx context.Context, opts []option.ClientOption) (*auth.Credentials, error) {
+ var ds internal.DialSettings
+ for _, opt := range opts {
+ opt.Apply(&ds)
+ }
+ return internal.AuthCreds(ctx, &ds)
+}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
index 23aba01b6..1b134caa8 100644
--- a/vendor/google.golang.org/api/option/option.go
+++ b/vendor/google.golang.org/api/option/option.go
@@ -7,6 +7,7 @@ package option
import (
"crypto/tls"
+ "log/slog"
"net/http"
"cloud.google.com/go/auth"
@@ -43,6 +44,14 @@ func (w withCredFile) Apply(o *internal.DialSettings) {
// WithCredentialsFile returns a ClientOption that authenticates
// API calls with the given service account or refresh token JSON
// credentials file.
+//
+// Important: If you accept a credential configuration (credential
+// JSON/File/Stream) from an external source for authentication to Google
+// Cloud Platform, you must validate it before providing it to any Google
+// API or library. Providing an unvalidated credential configuration to
+// Google APIs can compromise the security of your systems and data. For
+// more information, refer to [Validate credential configurations from
+// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func WithCredentialsFile(filename string) ClientOption {
return withCredFile(filename)
}
@@ -50,6 +59,14 @@ func WithCredentialsFile(filename string) ClientOption {
// WithServiceAccountFile returns a ClientOption that uses a Google service
// account credentials file to authenticate.
//
+// Important: If you accept a credential configuration (credential
+// JSON/File/Stream) from an external source for authentication to Google
+// Cloud Platform, you must validate it before providing it to any Google
+// API or library. Providing an unvalidated credential configuration to
+// Google APIs can compromise the security of your systems and data. For
+// more information, refer to [Validate credential configurations from
+// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
+//
// Deprecated: Use WithCredentialsFile instead.
func WithServiceAccountFile(filename string) ClientOption {
return WithCredentialsFile(filename)
@@ -58,6 +75,14 @@ func WithServiceAccountFile(filename string) ClientOption {
// WithCredentialsJSON returns a ClientOption that authenticates
// API calls with the given service account or refresh token JSON
// credentials.
+//
+// Important: If you accept a credential configuration (credential
+// JSON/File/Stream) from an external source for authentication to Google
+// Cloud Platform, you must validate it before providing it to any Google
+// API or library. Providing an unvalidated credential configuration to
+// Google APIs can compromise the security of your systems and data. For
+// more information, refer to [Validate credential configurations from
+// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
func WithCredentialsJSON(p []byte) ClientOption {
return withCredentialsJSON(p)
}
@@ -70,7 +95,14 @@ func (w withCredentialsJSON) Apply(o *internal.DialSettings) {
}
// WithEndpoint returns a ClientOption that overrides the default endpoint
-// to be used for a service.
+// to be used for a service. Please note that by default Google APIs only
+// accept HTTPS traffic.
+//
+// For a gRPC client, the port number is typically included in the endpoint.
+// Example: "us-central1-speech.googleapis.com:443".
+//
+// For a REST client, the port number is typically not included. Example:
+// "https://speech.googleapis.com".
func WithEndpoint(url string) ClientOption {
return withEndpoint(url)
}
@@ -359,8 +391,6 @@ func (w withAuthCredentials) Apply(o *internal.DialSettings) {
}
// WithUniverseDomain returns a ClientOption that sets the universe domain.
-//
-// This is an EXPERIMENTAL API and may be changed or removed in the future.
func WithUniverseDomain(ud string) ClientOption {
return withUniverseDomain(ud)
}
@@ -370,3 +400,17 @@ type withUniverseDomain string
func (w withUniverseDomain) Apply(o *internal.DialSettings) {
o.UniverseDomain = string(w)
}
+
+// WithLogger returns a ClientOption that sets the logger used throughout the
+// client library call stack. If this option is provided it takes precedence
+// over the value set in GOOGLE_SDK_GO_LOGGING_LEVEL. Specifying this option
+// enables logging at the provided logger's configured level.
+func WithLogger(l *slog.Logger) ClientOption {
+ return withLogger{l}
+}
+
+type withLogger struct{ l *slog.Logger }
+
+func (w withLogger) Apply(o *internal.DialSettings) {
+ o.Logger = w.l
+}
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index d2a4f7664..a6630a0e4 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -22,7 +22,6 @@ import (
"cloud.google.com/go/auth/grpctransport"
"cloud.google.com/go/auth/oauth2adapt"
"cloud.google.com/go/compute/metadata"
- "go.opencensus.io/plugin/ocgrpc"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/oauth2"
"golang.org/x/time/rate"
@@ -229,24 +228,29 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna
GRPCDialOpts: prepareDialOptsNewAuth(ds),
PoolSize: poolSize,
Credentials: creds,
+ ClientCertProvider: ds.ClientCertSource,
APIKey: ds.APIKey,
DetectOpts: &credentials.DetectOptions{
Scopes: ds.Scopes,
Audience: aud,
CredentialsFile: ds.CredentialsFile,
CredentialsJSON: ds.CredentialsJSON,
+ Logger: ds.Logger,
},
InternalOptions: &grpctransport.InternalOptions{
EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount,
EnableDirectPath: ds.EnableDirectPath,
EnableDirectPathXds: ds.EnableDirectPathXds,
EnableJWTWithScope: ds.EnableJwtWithScope,
+ AllowHardBoundTokens: ds.AllowHardBoundTokens,
DefaultAudience: ds.DefaultAudience,
DefaultEndpointTemplate: defaultEndpointTemplate,
DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint,
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
+ UniverseDomain: ds.UniverseDomain,
+ Logger: ds.Logger,
})
return pool, err
}
@@ -260,6 +264,40 @@ func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption {
return append(opts, ds.GRPCDialOpts...)
}
+// dryRunAsync is a wrapper for oauth2.TokenSource that performs a sync refresh
+// after an async refresh. Token generated by async refresh is not used.
+//
+// This is an EXPERIMENTAL feature and may be removed or changed in the future.
+// It is a temporary struct to determine if the async refresh
+// is working properly.
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+type dryRunAsync struct {
+ asyncTokenSource oauth2.TokenSource
+ syncTokenSource oauth2.TokenSource
+ errHandler func()
+}
+
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+func newDryRunAsync(ts oauth2.TokenSource, errHandler func()) dryRunAsync {
+ tp := auth.NewCachedTokenProvider(oauth2adapt.TokenProviderFromTokenSource(ts), nil)
+ asyncTs := oauth2adapt.TokenSourceFromTokenProvider(tp)
+ return dryRunAsync{
+ syncTokenSource: ts,
+ asyncTokenSource: asyncTs,
+ errHandler: errHandler,
+ }
+}
+
+// Token returns a token or an error.
+// TODO(b/372244283): Remove after b/358175516 has been fixed
+func (async dryRunAsync) Token() (*oauth2.Token, error) {
+ _, err := async.asyncTokenSource.Token()
+ if err != nil {
+ async.errHandler()
+ }
+ return async.syncTokenSource.Token()
+}
+
func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) {
if o.HTTPClient != nil {
return nil, errors.New("unsupported HTTP client specified")
@@ -296,8 +334,14 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
if err != nil {
return nil, err
}
+
+ ts := creds.TokenSource
+ // TODO(b/372244283): Remove after b/358175516 has been fixed
+ if o.EnableAsyncRefreshDryRun != nil {
+ ts = newDryRunAsync(ts, o.EnableAsyncRefreshDryRun)
+ }
grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{
- TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource},
+ TokenSource: oauth.TokenSource{TokenSource: ts},
quotaProject: internal.GetQuotaProject(creds, o.QuotaProject),
requestReason: o.RequestReason,
}))
@@ -343,7 +387,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
// Add tracing, but before the other options, so that clients can override the
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
- grpcOpts = addOCStatsHandler(grpcOpts, o)
grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o)
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
@@ -353,13 +396,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
return dialContext(ctx, endpoint, grpcOpts...)
}
-func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
- if settings.TelemetryDisabled {
- return opts
- }
- return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
-}
-
func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
if settings.TelemetryDisabled {
return opts
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
index 2e2b15c6e..a33df9120 100644
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -19,7 +19,6 @@ import (
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/httptransport"
"cloud.google.com/go/auth/oauth2adapt"
- "go.opencensus.io/plugin/ochttp"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
@@ -27,7 +26,6 @@ import (
"google.golang.org/api/internal"
"google.golang.org/api/internal/cert"
"google.golang.org/api/option"
- "google.golang.org/api/transport/http/internal/propagation"
)
// NewClient returns an HTTP client for use communicating with a Google cloud
@@ -107,6 +105,9 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.
if ds.RequestReason != "" {
headers.Set("X-goog-request-reason", ds.RequestReason)
}
+ if ds.UserAgent != "" {
+ headers.Set("User-Agent", ds.UserAgent)
+ }
client, err := httptransport.NewClient(&httptransport.Options{
DisableTelemetry: ds.TelemetryDisabled,
DisableAuthentication: ds.NoAuth,
@@ -121,6 +122,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.
Audience: aud,
CredentialsFile: ds.CredentialsFile,
CredentialsJSON: ds.CredentialsJSON,
+ Logger: ds.Logger,
},
InternalOptions: &httptransport.InternalOptions{
EnableJWTWithScope: ds.EnableJwtWithScope,
@@ -130,6 +132,8 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.
DefaultScopes: ds.DefaultScopes,
SkipValidation: skipValidation,
},
+ UniverseDomain: ds.UniverseDomain,
+ Logger: ds.Logger,
})
if err != nil {
return nil, err
@@ -164,10 +168,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna
requestReason: settings.RequestReason,
}
var trans http.RoundTripper = paramTransport
- // Give OpenTelemetry precedence over OpenCensus in case user configuration
- // causes both to write the same header (`X-Cloud-Trace-Context`).
trans = addOpenTelemetryTransport(trans, settings)
- trans = addOCTransport(trans, settings)
switch {
case settings.NoAuth:
// Do nothing.
@@ -308,16 +309,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS
return otelhttp.NewTransport(trans)
}
-func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
- if settings.TelemetryDisabled {
- return trans
- }
- return &ochttp.Transport{
- Base: trans,
- Propagation: &propagation.HTTPFormat{},
- }
-}
-
// clonedTransport returns the given RoundTripper as a cloned *http.Transport.
// It returns nil if the RoundTripper can't be cloned or coerced to
// *http.Transport.
diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
deleted file mode 100644
index ba7512aa2..000000000
--- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 Google LLC.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.8
-// +build go1.8
-
-// Package propagation implements X-Cloud-Trace-Context header propagation used
-// by Google Cloud products.
-package propagation
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- httpHeader = `X-Cloud-Trace-Context`
-)
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// HTTPFormat implements propagation.HTTPFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
-type HTTPFormat struct{}
-
-// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(httpHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(httpHeader, header)
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index fe19e8f97..4a9fce53c 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -180,6 +180,8 @@ type CommonLanguageSettings struct {
ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
// The destination where API teams want this client library to be published.
Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
+ // Configuration for which RPCs should be generated in the GAPIC client.
+ SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
}
func (x *CommonLanguageSettings) Reset() {
@@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination {
return nil
}
+func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
+ if x != nil {
+ return x.SelectiveGapicGeneration
+ }
+ return nil
+}
+
// Details about how and where to publish client libraries.
type ClientLibrarySettings struct {
state protoimpl.MessageState
@@ -719,6 +728,8 @@ type PythonSettings struct {
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Experimental features to be included during client library generation.
+ ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
}
func (x *PythonSettings) Reset() {
@@ -760,6 +771,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
return nil
}
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+ if x != nil {
+ return x.ExperimentalFeatures
+ }
+ return nil
+}
+
// Settings for Node client libraries.
type NodeSettings struct {
state protoimpl.MessageState
@@ -975,6 +993,16 @@ type GoSettings struct {
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Map of service names to renamed services. Keys are the package relative
+ // service names and values are the name to be used for the service client
+ // and call options.
+ //
+ // publishing:
+ //
+ // go_settings:
+ // renamed_services:
+ // Publisher: TopicAdmin
+ RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *GoSettings) Reset() {
@@ -1016,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings {
return nil
}
+func (x *GoSettings) GetRenamedServices() map[string]string {
+ if x != nil {
+ return x.RenamedServices
+ }
+ return nil
+}
+
// Describes the generator configuration for a method.
type MethodSettings struct {
state protoimpl.MessageState
@@ -1114,6 +1149,123 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string {
return nil
}
+// This message is used to configure the generation of a subset of the RPCs in
+// a service for client libraries.
+type SelectiveGapicGeneration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An allowlist of the fully qualified names of RPCs that should be included
+ // on public client surfaces.
+ Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
+}
+
+func (x *SelectiveGapicGeneration) Reset() {
+ *x = SelectiveGapicGeneration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SelectiveGapicGeneration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelectiveGapicGeneration) ProtoMessage() {}
+
+func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
+func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SelectiveGapicGeneration) GetMethods() []string {
+ if x != nil {
+ return x.Methods
+ }
+ return nil
+}
+
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables generation of asynchronous REST clients if `rest` transport is
+ // enabled. By default, asynchronous REST clients will not be generated.
+ // This feature will be enabled by default 1 month after launching the
+ // feature in preview packages.
+ RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+ // Enables generation of protobuf code using new types that are more
+ // Pythonic which are included in `protobuf>=5.29.x`. This feature will be
+ // enabled by default 1 month after launching the feature in preview
+ // packages.
+ ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+ *x = PythonSettings_ExperimentalFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+ if x != nil {
+ return x.RestAsyncIoEnabled
+ }
+ return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
+ if x != nil {
+ return x.ProtobufPythonicTypesEnabled
+ }
+ return false
+}
+
// Describes settings to use when generating API methods that use the
// long-running operation pattern.
// All default values below are from those used in the client library
@@ -1142,7 +1294,7 @@ type MethodSettings_LongRunning struct {
func (x *MethodSettings_LongRunning) Reset() {
*x = MethodSettings_LongRunning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1155,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string {
func (*MethodSettings_LongRunning) ProtoMessage() {}
func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1343,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
@@ -1352,240 +1504,275 @@ var file_google_api_client_proto_rawDesc = []byte{
0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05,
- 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65,
- 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a,
- 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e,
- 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e,
- 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a,
- 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61,
- 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70,
- 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70,
- 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
- 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74,
- 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
- 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74,
- 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72,
- 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01,
+ 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
+ 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
+ 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+ 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+ 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
+ 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
+ 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
+ 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
+ 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
+ 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
+ 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75,
- 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f,
- 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69,
- 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69,
- 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64,
- 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69,
- 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f,
- 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21,
- 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65,
- 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74,
- 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a,
- 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72,
- 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61,
- 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72,
- 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f,
- 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
- 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
- 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72,
- 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
- 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73,
- 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65,
- 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a,
- 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c,
- 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
- 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a,
- 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
+ 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
+ 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
+ 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
+ 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
+ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
+ 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
+ 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
+ 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
+ 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
+ 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
+ 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
+ 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
+ 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+ 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
+ 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73,
- 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65,
+ 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
+ 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
+ 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+ 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
+ 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+ 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
+ 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
+ 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
+ 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64,
+ 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
+ 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
+ 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e,
+ 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12,
+ 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16,
+ 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41,
+ 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+ 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74,
+ 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a,
+ 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65,
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a,
- 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
- 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
- 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
- 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
- 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
- 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
- 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
- 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
- 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
- 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
- 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
- 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
- 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
- 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
- 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2,
- 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a,
- 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
- 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e,
- 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f,
- 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70,
- 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a,
- 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12,
- 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
- 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c,
- 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65,
- 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d,
- 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
- 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a,
+ 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+ 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67,
+ 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68,
+ 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52,
+ 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e,
+ 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75,
+ 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65,
+ 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67,
+ 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79,
+ 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70,
+ 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c,
+ 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c,
+ 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
+ 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61,
- 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f,
- 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69,
- 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52,
- 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09,
- 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53,
- 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f,
- 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12,
- 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a,
- 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41,
- 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
- 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a,
- 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52,
- 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43,
- 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48,
- 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f,
- 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75,
- 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69,
- 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34,
+ 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c,
+ 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42,
+ 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44,
+ 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12,
+ 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04,
+ 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07,
+ 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52,
+ 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
+ 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13,
+ 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45,
+ 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a,
+ 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12,
+ 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61,
+ 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42,
+ 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+ 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
@@ -1601,69 +1788,75 @@ func file_google_api_client_proto_rawDescGZIP() []byte {
}
var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_google_api_client_proto_goTypes = []interface{}{
- (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
- (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
- (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
- (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
- (*Publishing)(nil), // 4: google.api.Publishing
- (*JavaSettings)(nil), // 5: google.api.JavaSettings
- (*CppSettings)(nil), // 6: google.api.CppSettings
- (*PhpSettings)(nil), // 7: google.api.PhpSettings
- (*PythonSettings)(nil), // 8: google.api.PythonSettings
- (*NodeSettings)(nil), // 9: google.api.NodeSettings
- (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
- (*RubySettings)(nil), // 11: google.api.RubySettings
- (*GoSettings)(nil), // 12: google.api.GoSettings
- (*MethodSettings)(nil), // 13: google.api.MethodSettings
- nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
- nil, // 15: google.api.DotnetSettings.RenamedServicesEntry
- nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry
- (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning
- (api.LaunchStage)(0), // 18: google.api.LaunchStage
- (*durationpb.Duration)(nil), // 19: google.protobuf.Duration
- (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions
- (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
+ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
+ (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
+ (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
+ (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
+ (*Publishing)(nil), // 4: google.api.Publishing
+ (*JavaSettings)(nil), // 5: google.api.JavaSettings
+ (*CppSettings)(nil), // 6: google.api.CppSettings
+ (*PhpSettings)(nil), // 7: google.api.PhpSettings
+ (*PythonSettings)(nil), // 8: google.api.PythonSettings
+ (*NodeSettings)(nil), // 9: google.api.NodeSettings
+ (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
+ (*RubySettings)(nil), // 11: google.api.RubySettings
+ (*GoSettings)(nil), // 12: google.api.GoSettings
+ (*MethodSettings)(nil), // 13: google.api.MethodSettings
+ (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration
+ nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry
+ (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
+ nil, // 17: google.api.DotnetSettings.RenamedServicesEntry
+ nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry
+ nil, // 19: google.api.GoSettings.RenamedServicesEntry
+ (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning
+ (api.LaunchStage)(0), // 21: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 22: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
}
var file_google_api_client_proto_depIdxs = []int32{
1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
- 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
- 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
- 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
- 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
- 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
- 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
- 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
- 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
- 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
- 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
- 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
- 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
- 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
- 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
- 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
- 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
- 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
- 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
- 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
- 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
- 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
- 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
- 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
- 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
- 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions
- 32, // [32:32] is the sub-list for method output_type
- 32, // [32:32] is the sub-list for method input_type
- 32, // [32:32] is the sub-list for extension type_name
- 28, // [28:32] is the sub-list for extension extendee
- 0, // [0:28] is the sub-list for field type_name
+ 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
+ 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+ 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
+ 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
+ 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
+ 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
+ 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
+ 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
+ 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
+ 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
+ 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
+ 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
+ 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
+ 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
+ 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
+ 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+ 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+ 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+ 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+ 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+ 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
+ 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+ 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+ 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+ 35, // [35:35] is the sub-list for method output_type
+ 35, // [35:35] is the sub-list for method input_type
+ 35, // [35:35] is the sub-list for extension type_name
+ 31, // [31:35] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
}
func init() { file_google_api_client_proto_init() }
@@ -1816,7 +2009,31 @@ func file_google_api_client_proto_init() {
return nil
}
}
- file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SelectiveGapicGeneration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MethodSettings_LongRunning); i {
case 0:
return &v.state
@@ -1835,7 +2052,7 @@ func file_google_api_client_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_client_proto_rawDesc,
NumEnums: 2,
- NumMessages: 16,
+ NumMessages: 19,
NumExtensions: 4,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
index d4b89c98d..7f6e006cd 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
@@ -172,6 +172,63 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1}
}
+// The resource hierarchy level of the timeseries data of a metric.
+type MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_MetricDescriptorMetadata_TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 0
+ // Scopes a metric to a project.
+ MetricDescriptor_MetricDescriptorMetadata_PROJECT MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 1
+ // Scopes a metric to an organization.
+ MetricDescriptor_MetricDescriptorMetadata_ORGANIZATION MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 2
+ // Scopes a metric to a folder.
+ MetricDescriptor_MetricDescriptorMetadata_FOLDER MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel = 3
+)
+
+// Enum value maps for MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel.
+var (
+ MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel_name = map[int32]string{
+ 0: "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED",
+ 1: "PROJECT",
+ 2: "ORGANIZATION",
+ 3: "FOLDER",
+ }
+ MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel_value = map[string]int32{
+ "TIME_SERIES_RESOURCE_HIERARCHY_LEVEL_UNSPECIFIED": 0,
+ "PROJECT": 1,
+ "ORGANIZATION": 2,
+ "FOLDER": 3,
+ }
+)
+
+func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Enum() *MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel {
+ p := new(MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel)
+ *p = x
+ return p
+}
+
+func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_metric_proto_enumTypes[2].Descriptor()
+}
+
+func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Type() protoreflect.EnumType {
+ return &file_google_api_metric_proto_enumTypes[2]
+}
+
+func (x MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel.Descriptor instead.
+func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
// Defines a metric type and its schema. Once a metric descriptor is created,
// deleting or altering it stops data collection and makes the metric type's
// existing data unusable.
@@ -519,6 +576,8 @@ type MetricDescriptor_MetricDescriptorMetadata struct {
// age are guaranteed to be ingested and available to be read, excluding
// data loss due to errors.
IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"`
+ // The scope of the timeseries data of the metric.
+ TimeSeriesResourceHierarchyLevel []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel `protobuf:"varint,4,rep,packed,name=time_series_resource_hierarchy_level,json=timeSeriesResourceHierarchyLevel,proto3,enum=google.api.MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel" json:"time_series_resource_hierarchy_level,omitempty"`
}
func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() {
@@ -575,6 +634,13 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb
return nil
}
+func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarchyLevel() []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel {
+ if x != nil {
+ return x.TimeSeriesResourceHierarchyLevel
+ }
+ return nil
+}
+
var File_google_api_metric_proto protoreflect.FileDescriptor
var file_google_api_metric_proto_rawDesc = []byte{
@@ -585,7 +651,7 @@ var file_google_api_metric_proto_rawDesc = []byte{
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68,
0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x09, 0x0a,
0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20,
@@ -620,7 +686,7 @@ var file_google_api_metric_proto_rawDesc = []byte{
0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79,
0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
- 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x73, 0x1a, 0x87, 0x04, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e,
0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
@@ -633,35 +699,54 @@ var file_google_api_metric_proto_rawDesc = []byte{
0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a,
- 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45,
- 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45,
- 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a,
- 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a,
- 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41,
- 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
- 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01,
- 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44,
- 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
- 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54,
- 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06,
- 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
- 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
- 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
- 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47,
- 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0xa6, 0x01, 0x0a,
+ 0x24, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x56, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x52, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a, 0x20, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x69, 0x65, 0x72,
+ 0x61, 0x72, 0x63, 0x68, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x30, 0x54, 0x49,
+ 0x4d, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x5f, 0x48, 0x49, 0x45, 0x52, 0x41, 0x52, 0x43, 0x48, 0x59, 0x5f, 0x4c, 0x45, 0x56,
+ 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a,
+ 0x0c, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12,
+ 0x0a, 0x0a, 0x06, 0x46, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x03, 0x22, 0x4f, 0x0a, 0x0a, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, 0x54,
+ 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
+ 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a,
+ 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, 0x09,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, 0x4c,
+ 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12,
+ 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f,
+ 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+ 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x49,
+ 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, 0x22,
+ 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x36,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06,
+ 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, 0x41,
+ 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -676,34 +761,36 @@ func file_google_api_metric_proto_rawDescGZIP() []byte {
return file_google_api_metric_proto_rawDescData
}
-var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_google_api_metric_proto_goTypes = []interface{}{
- (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind
- (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType
- (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor
- (*Metric)(nil), // 3: google.api.Metric
- (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata
- nil, // 5: google.api.Metric.LabelsEntry
- (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor
- (api.LaunchStage)(0), // 7: google.api.LaunchStage
- (*durationpb.Duration)(nil), // 8: google.protobuf.Duration
+ (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind
+ (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType
+ (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel)(0), // 2: google.api.MetricDescriptor.MetricDescriptorMetadata.TimeSeriesResourceHierarchyLevel
+ (*MetricDescriptor)(nil), // 3: google.api.MetricDescriptor
+ (*Metric)(nil), // 4: google.api.Metric
+ (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 5: google.api.MetricDescriptor.MetricDescriptorMetadata
+ nil, // 6: google.api.Metric.LabelsEntry
+ (*label.LabelDescriptor)(nil), // 7: google.api.LabelDescriptor
+ (api.LaunchStage)(0), // 8: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 9: google.protobuf.Duration
}
var file_google_api_metric_proto_depIdxs = []int32{
- 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor
- 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
- 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
- 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata
- 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage
- 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry
- 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage
- 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration
- 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration
- 9, // [9:9] is the sub-list for method output_type
- 9, // [9:9] is the sub-list for method input_type
- 9, // [9:9] is the sub-list for extension type_name
- 9, // [9:9] is the sub-list for extension extendee
- 0, // [0:9] is the sub-list for field type_name
+ 7, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor
+ 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
+ 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
+ 5, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata
+ 8, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage
+ 6, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry
+ 8, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage
+ 9, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration
+ 9, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration
+ 2, // 9: google.api.MetricDescriptor.MetricDescriptorMetadata.time_series_resource_hierarchy_level:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata.TimeSeriesResourceHierarchyLevel
+ 10, // [10:10] is the sub-list for method output_type
+ 10, // [10:10] is the sub-list for method input_type
+ 10, // [10:10] is the sub-list for extension type_name
+ 10, // [10:10] is the sub-list for extension extendee
+ 0, // [0:10] is the sub-list for field type_name
}
func init() { file_google_api_metric_proto_init() }
@@ -754,7 +841,7 @@ func file_google_api_metric_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_metric_proto_rawDesc,
- NumEnums: 2,
+ NumEnums: 3,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
index bd46edbe7..85a9387f7 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
index 3e5621827..bed9216d8 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -80,11 +80,12 @@ type ErrorInfo struct {
Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
// Additional structured details about this error.
//
- // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
+ // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should
+ // ideally be lowerCamelCase. Also, they must be limited to 64 characters in
// length. When identifying the current value of an exceeded limit, the units
// should be contained in the key, not the value. For example, rather than
- // {"instanceLimit": "100/request"}, should be returned as,
- // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
+ // `{"instanceLimit": "100/request"}`, should be returned as,
+ // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
// instances that can be created in a single (batch) request.
Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
@@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct {
Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
// A description of why the request element is bad.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The reason of the field-level error. This is a constant value that
+ // identifies the proximate cause of the field-level error. It should
+ // uniquely identify the type of the FieldViolation within the scope of the
+ // google.rpc.ErrorInfo.domain. This should be at most 63
+ // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`,
+ // which represents UPPER_SNAKE_CASE.
+ Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"`
+ // Provides a localized error message for field-level errors that is safe to
+ // return to the API consumer.
+ LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"`
}
func (x *BadRequest_FieldViolation) Reset() {
@@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string {
return ""
}
+func (x *BadRequest_FieldViolation) GetReason() string {
+ if x != nil {
+ return x.Reason
+ }
+ return ""
+}
+
+func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage {
+ if x != nil {
+ return x.LocalizedMessage
+ }
+ return nil
+}
+
// Describes a URL link.
type Help_Link struct {
state protoimpl.MessageState
@@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{
0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61,
0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
- 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
- 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
- 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
- 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
- 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
- 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
- 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
- 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
- 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
- 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
- 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a,
+ 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65,
+ 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
+ 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04,
+ 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
+ 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75,
+ 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a,
+ 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65,
+ 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50,
+ 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{
12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
- 6, // [6:6] is the sub-list for method output_type
- 6, // [6:6] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
+ 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
}
func init() { file_google_rpc_error_details_proto_init() }
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 6ad1b1c1d..06a3f7106 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 0854d298e..d9bfa6e1e 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
@@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
is a great place to start. These issues are well-documented and usually can be
resolved with a single pull request.
-- If you are adding a new file, make sure it has the copyright message template
- at the top as a comment. You can copy over the message from an existing file
+- If you are adding a new file, make sure it has the copyright message template
+ at the top as a comment. You can copy over the message from an existing file
and update the year.
- The grpc package should only depend on standard Go packages and a small number
@@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a github issue if it exists.
+ and **why** it was made. Link to a GitHub issue if it exists.
-- If you want to fix formatting or style, consider whether your changes are an
- obvious improvement or might be considered a personal preference. If a style
- change is based on preference, it likely will not be accepted. If it corrects
- widely agreed-upon anti-patterns, then please do create a PR and explain the
+- If you want to fix formatting or style, consider whether your changes are an
+ obvious improvement or might be considered a personal preference. If a style
+ change is based on preference, it likely will not be accepted. If it corrects
+ widely agreed-upon anti-patterns, then please do create a PR and explain the
benefits of the change.
- Unless your PR is trivial, you should expect there will be reviewer comments
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 6a8a07781..5d4096d46 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,21 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
-- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [canguler](https://github.com/canguler), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index ab0fbb79b..b572707c6 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
## Prerequisites
-- **[Go][]**: any one of the **three latest major** [releases][go-releases].
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
## Installation
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
index be6e10870..abab27937 100644
--- a/vendor/google.golang.org/grpc/SECURITY.md
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -1,3 +1,3 @@
# Security Policy
-For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b50..d7b40b7cb 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index f391744f7..c9b343c71 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
@@ -74,6 +75,8 @@ func unregisterForTesting(name string) {
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
@@ -92,54 +95,6 @@ func Get(name string) Builder {
return nil
}
-// A SubConn represents a single connection to a gRPC backend service.
-//
-// Each SubConn contains a list of addresses.
-//
-// All SubConns start in IDLE, and will not try to connect. To trigger the
-// connecting, Balancers must call Connect. If a connection re-enters IDLE,
-// Balancers must call Connect again to trigger a new connection attempt.
-//
-// gRPC will try to connect to the addresses in sequence, and stop trying the
-// remainder once the first connection is successful. If an attempt to connect
-// to all addresses encounters an error, the SubConn will enter
-// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
-//
-// Once established, if a connection is lost, the SubConn will transition
-// directly to IDLE.
-//
-// This interface is to be implemented by gRPC. Users should not need their own
-// implementation of this interface. For situations like testing, any
-// implementations should embed this interface. This allows gRPC to add new
-// methods to this interface.
-type SubConn interface {
- // UpdateAddresses updates the addresses used in this SubConn.
- // gRPC checks if currently-connected address is still in the new list.
- // If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully closed, and
- // a new connection will be created.
- //
- // This will trigger a state transition for the SubConn.
- //
- // Deprecated: this method will be removed. Create new SubConns for new
- // addresses instead.
- UpdateAddresses([]resolver.Address)
- // Connect starts the connecting for this SubConn.
- Connect()
- // GetOrBuildProducer returns a reference to the existing Producer for this
- // ProducerBuilder in this SubConn, or, if one does not currently exist,
- // creates a new one and returns it. Returns a close function which must
- // be called when the Producer is no longer needed.
- GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
- // Shutdown shuts down the SubConn gracefully. Any started RPCs will be
- // allowed to complete. No future calls should be made on the SubConn.
- // One final state update will be delivered to the StateListener (or
- // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
- // indicate the shutdown operation. This may be delivered before
- // in-progress RPCs are complete and the actual connection is closed.
- Shutdown()
-}
-
// NewSubConnOptions contains options to create new SubConn.
type NewSubConnOptions struct {
// CredsBundle is the credentials bundle that will be used in the created
@@ -174,6 +129,13 @@ type State struct {
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices. Users should not need their own complete
+// implementation of this interface -- they should always delegate to a
+// ClientConn passed to Builder.Build() by embedding it in their
+// implementations. An embedded ClientConn must never be nil, or runtime panics
+// will occur.
type ClientConn interface {
// NewSubConn is called by balancer to create a new SubConn.
// It doesn't block and wait for the connections to be established.
@@ -212,6 +174,17 @@ type ClientConn interface {
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string
+
+ // MetricsRecorder provides the metrics recorder that balancers can use to
+ // record metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this method. The returned
+ // MetricsRecorder is guaranteed to never be nil.
+ MetricsRecorder() estats.MetricsRecorder
+
+ // EnforceClientConnEmbedding is included to force implementers to embed
+ // another implementation of this interface, allowing gRPC to add methods
+ // without breaking users.
+ internal.EnforceClientConnEmbedding
}
// BuildOptions contains additional information for Build.
@@ -403,15 +376,6 @@ type ExitIdler interface {
ExitIdle()
}
-// SubConnState describes the state of a SubConn.
-type SubConnState struct {
- // ConnectivityState is the connectivity state of the SubConn.
- ConnectivityState connectivity.State
- // ConnectionError is set if the ConnectivityState is TransientFailure,
- // describing the reason the SubConn failed. Otherwise, it is nil.
- ConnectionError error
-}
-
// ClientConnState describes the state of a ClientConn relevant to the
// balancer.
type ClientConnState struct {
@@ -424,20 +388,3 @@ type ClientConnState struct {
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
-
-// A ProducerBuilder is a simple constructor for a Producer. It is used by the
-// SubConn to create producers when needed.
-type ProducerBuilder interface {
- // Build creates a Producer. The first parameter is always a
- // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
- // associated SubConn), but is declared as `any` to avoid a dependency
- // cycle. Should also return a close function that will be called when all
- // references to the Producer have been given up.
- Build(grpcClientConnInterface any) (p Producer, close func())
-}
-
-// A Producer is a type shared among potentially many consumers. It is
-// associated with a SubConn, and an implementation will typically contain
-// other methods to provide additional functionality, e.g. configuration or
-// subscription registration.
-type Producer any
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a7f1eeec8..d5ed172ae 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
// If resolver state contains no addresses, return an error so ClientConn
- // will trigger re-resolve. Also records this as an resolver error, so when
+ // will trigger re-resolve. Also records this as a resolver error, so when
// the overall state turns transient failure, the error message will have
// the zero address information.
if len(s.ResolverState.Addresses) == 0 {
@@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
new file mode 100644
index 000000000..421c4fecc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -0,0 +1,358 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package endpointsharding implements a load balancing policy that manages
+// homogeneous child policies each owning a single endpoint.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package endpointsharding
+
+import (
+ "errors"
+ rand "math/rand/v2"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+)
+
+// ChildState is the balancer state of a child along with the endpoint which
+// identifies the child balancer.
+type ChildState struct {
+ Endpoint resolver.Endpoint
+ State balancer.State
+
+ // Balancer exposes only the ExitIdler interface of the child LB policy.
+ // Other methods of the child policy are called only by endpointsharding.
+ Balancer balancer.ExitIdler
+}
+
+// Options are the options to configure the behaviour of the
+// endpointsharding balancer.
+type Options struct {
+ // DisableAutoReconnect allows the balancer to keep child balancer in the
+ // IDLE state until they are explicitly triggered to exit using the
+ // ChildState obtained from the endpointsharding picker. When set to false,
+ // the endpointsharding balancer will automatically call ExitIdle on child
+ // connections that report IDLE.
+ DisableAutoReconnect bool
+}
+
+// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
+// type as the balancer.Builder.Build method.
+type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
+
+// NewBalancer returns a load balancing policy that manages homogeneous child
+// policies each owning a single endpoint. The endpointsharding balancer
+// forwards the LoadBalancingConfig in ClientConn state updates to its children.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
+ es := &endpointSharding{
+ cc: cc,
+ bOpts: opts,
+ esOpts: esOpts,
+ childBuilder: childBuilder,
+ }
+ es.children.Store(resolver.NewEndpointMap())
+ return es
+}
+
+// endpointSharding is a balancer that wraps child balancers. It creates a child
+// balancer with child config for every unique Endpoint received. It updates the
+// child states on any update from parent or child.
+type endpointSharding struct {
+ cc balancer.ClientConn
+ bOpts balancer.BuildOptions
+ esOpts Options
+ childBuilder ChildBuilderFunc
+
+ // childMu synchronizes calls to any single child. It must be held for all
+ // calls into a child. To avoid deadlocks, do not acquire childMu while
+ // holding mu.
+ childMu sync.Mutex
+ children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper
+
+ // inhibitChildUpdates is set during UpdateClientConnState/ResolverError
+ // calls (calls to children will each produce an update, only want one
+ // update).
+ inhibitChildUpdates atomic.Bool
+
+ // mu synchronizes access to the state stored in balancerWrappers in the
+ // children field. mu must not be held during calls into a child since
+ // synchronous calls back from the child may require taking mu, causing a
+ // deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
+ mu sync.Mutex
+}
+
+// UpdateClientConnState creates a child for new endpoints and deletes children
+// for endpoints that are no longer present. It also updates all the children,
+// and sends a single synchronous update of the childrens' aggregated state at
+// the end of the UpdateClientConnState operation. If any endpoint has no
+// addresses it will ignore that endpoint. Otherwise, returns first error found
+// from a child, but fully processes the new update.
+func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ var ret error
+
+ children := es.children.Load()
+ newChildren := resolver.NewEndpointMap()
+
+ // Update/Create new children.
+ for _, endpoint := range state.ResolverState.Endpoints {
+ if _, ok := newChildren.Get(endpoint); ok {
+ // Endpoint child was already created, continue to avoid duplicate
+ // update.
+ continue
+ }
+ var childBalancer *balancerWrapper
+ if val, ok := children.Get(endpoint); ok {
+ childBalancer = val.(*balancerWrapper)
+ // Endpoint attributes may have changed, update the stored endpoint.
+ es.mu.Lock()
+ childBalancer.childState.Endpoint = endpoint
+ es.mu.Unlock()
+ } else {
+ childBalancer = &balancerWrapper{
+ childState: ChildState{Endpoint: endpoint},
+ ClientConn: es.cc,
+ es: es,
+ }
+ childBalancer.childState.Balancer = childBalancer
+ childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
+ }
+ newChildren.Set(endpoint, childBalancer)
+ if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
+ BalancerConfig: state.BalancerConfig,
+ ResolverState: resolver.State{
+ Endpoints: []resolver.Endpoint{endpoint},
+ Attributes: state.ResolverState.Attributes,
+ },
+ }); err != nil && ret == nil {
+ // Return first error found, and always commit full processing of
+ // updating children. If desired to process more specific errors
+ // across all endpoints, caller should make these specific
+ // validations, this is a current limitation for simplicity sake.
+ ret = err
+ }
+ }
+ // Delete old children that are no longer present.
+ for _, e := range children.Keys() {
+ child, _ := children.Get(e)
+ if _, ok := newChildren.Get(e); !ok {
+ child.(*balancerWrapper).closeLocked()
+ }
+ }
+ es.children.Store(newChildren)
+ if newChildren.Len() == 0 {
+ return balancer.ErrBadResolverState
+ }
+ return ret
+}
+
+// ResolverError forwards the resolver error to all of the endpointSharding's
+// children and sends a single synchronous update of the childStates at the end
+// of the ResolverError operation.
+func (es *endpointSharding) ResolverError(err error) {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.(*balancerWrapper).resolverErrorLocked(err)
+ }
+}
+
+func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
+ // UpdateSubConnState is deprecated.
+}
+
+func (es *endpointSharding) Close() {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.(*balancerWrapper).closeLocked()
+ }
+}
+
+// updateState updates this component's state. It sends the aggregated state,
+// and a picker with round robin behavior with all the child states present if
+// needed.
+func (es *endpointSharding) updateState() {
+ if es.inhibitChildUpdates.Load() {
+ return
+ }
+ var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
+
+ es.mu.Lock()
+ defer es.mu.Unlock()
+
+ children := es.children.Load()
+ childStates := make([]ChildState, 0, children.Len())
+
+ for _, child := range children.Values() {
+ bw := child.(*balancerWrapper)
+ childState := bw.childState
+ childStates = append(childStates, childState)
+ childPicker := childState.State.Picker
+ switch childState.State.ConnectivityState {
+ case connectivity.Ready:
+ readyPickers = append(readyPickers, childPicker)
+ case connectivity.Connecting:
+ connectingPickers = append(connectingPickers, childPicker)
+ case connectivity.Idle:
+ idlePickers = append(idlePickers, childPicker)
+ case connectivity.TransientFailure:
+ transientFailurePickers = append(transientFailurePickers, childPicker)
+ // connectivity.Shutdown shouldn't appear.
+ }
+ }
+
+ // Construct the round robin picker based off the aggregated state. Whatever
+ // the aggregated state, use the pickers present that are currently in that
+ // state only.
+ var aggState connectivity.State
+ var pickers []balancer.Picker
+ if len(readyPickers) >= 1 {
+ aggState = connectivity.Ready
+ pickers = readyPickers
+ } else if len(connectingPickers) >= 1 {
+ aggState = connectivity.Connecting
+ pickers = connectingPickers
+ } else if len(idlePickers) >= 1 {
+ aggState = connectivity.Idle
+ pickers = idlePickers
+ } else if len(transientFailurePickers) >= 1 {
+ aggState = connectivity.TransientFailure
+ pickers = transientFailurePickers
+ } else {
+ aggState = connectivity.TransientFailure
+ pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
+ } // No children (resolver error before valid update).
+ p := &pickerWithChildStates{
+ pickers: pickers,
+ childStates: childStates,
+ next: uint32(rand.IntN(len(pickers))),
+ }
+ es.cc.UpdateState(balancer.State{
+ ConnectivityState: aggState,
+ Picker: p,
+ })
+}
+
+// pickerWithChildStates delegates to the pickers it holds in a round robin
+// fashion. It also contains the childStates of all the endpointSharding's
+// children.
+type pickerWithChildStates struct {
+ pickers []balancer.Picker
+ childStates []ChildState
+ next uint32
+}
+
+func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ nextIndex := atomic.AddUint32(&p.next, 1)
+ picker := p.pickers[nextIndex%uint32(len(p.pickers))]
+ return picker.Pick(info)
+}
+
+// ChildStatesFromPicker returns the state of all the children managed by the
+// endpoint sharding balancer that created this picker.
+func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
+ p, ok := picker.(*pickerWithChildStates)
+ if !ok {
+ return nil
+ }
+ return p.childStates
+}
+
+// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
+// endpoint, and persists recent child balancer state.
+type balancerWrapper struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+
+ // child contains the wrapped balancer. Access its methods only through
+ // methods on balancerWrapper to ensure proper synchronization
+ child balancer.Balancer
+ balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
+
+ es *endpointSharding
+
+ // Access to the following fields is guarded by es.mu.
+
+ childState ChildState
+ isClosed bool
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+ bw.es.mu.Lock()
+ bw.childState.State = state
+ bw.es.mu.Unlock()
+ if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
+ bw.ExitIdle()
+ }
+ bw.es.updateState()
+}
+
+// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
+// avoid deadlocks due to synchronous balancer state updates.
+func (bw *balancerWrapper) ExitIdle() {
+ if ei, ok := bw.child.(balancer.ExitIdler); ok {
+ go func() {
+ bw.es.childMu.Lock()
+ if !bw.isClosed {
+ ei.ExitIdle()
+ }
+ bw.es.childMu.Unlock()
+ }()
+ }
+}
+
+// updateClientConnStateLocked delivers the ClientConnState to the child
+// balancer. Callers must hold the child mutex of the parent endpointsharding
+// balancer.
+func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
+ return bw.child.UpdateClientConnState(ccs)
+}
+
+// closeLocked closes the child balancer. Callers must hold the child mutext of
+// the parent endpointsharding balancer.
+func (bw *balancerWrapper) closeLocked() {
+ bw.child.Close()
+ bw.isClosed = true
+}
+
+func (bw *balancerWrapper) resolverErrorLocked(err error) {
+ bw.child.ResolverError(err)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
index bdf93dbfe..eecfa1257 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
@@ -19,8 +19,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.2
+// protoc-gen-go v1.36.4
+// protoc v5.27.1
// source: grpc/lb/v1/load_balancer.proto
package grpc_lb_v1
@@ -32,6 +32,7 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -42,24 +43,21 @@ const (
)
type LoadBalanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to LoadBalanceRequestType:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to LoadBalanceRequestType:
//
// *LoadBalanceRequest_InitialRequest
// *LoadBalanceRequest_ClientStats
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *LoadBalanceRequest) Reset() {
*x = LoadBalanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LoadBalanceRequest) String() string {
@@ -70,7 +68,7 @@ func (*LoadBalanceRequest) ProtoMessage() {}
func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -85,23 +83,27 @@ func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0}
}
-func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
- if m != nil {
- return m.LoadBalanceRequestType
+func (x *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
+ if x != nil {
+ return x.LoadBalanceRequestType
}
return nil
}
func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
- if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
- return x.InitialRequest
+ if x != nil {
+ if x, ok := x.LoadBalanceRequestType.(*LoadBalanceRequest_InitialRequest); ok {
+ return x.InitialRequest
+ }
}
return nil
}
func (x *LoadBalanceRequest) GetClientStats() *ClientStats {
- if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
- return x.ClientStats
+ if x != nil {
+ if x, ok := x.LoadBalanceRequestType.(*LoadBalanceRequest_ClientStats); ok {
+ return x.ClientStats
+ }
}
return nil
}
@@ -126,24 +128,21 @@ func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceReques
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
type InitialLoadBalanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The name of the load balanced service (e.g., service.googleapis.com). Its
// length should be less than 256 bytes.
// The name might include a port number. How to handle the port number is up
// to the balancer.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *InitialLoadBalanceRequest) Reset() {
*x = InitialLoadBalanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *InitialLoadBalanceRequest) String() string {
@@ -154,7 +153,7 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {}
func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -178,23 +177,20 @@ func (x *InitialLoadBalanceRequest) GetName() string {
// Contains the number of calls finished for a particular load balance token.
type ClientStatsPerToken struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// See Server.load_balance_token.
LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"`
// The total number of RPCs that finished associated with the token.
- NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"`
+ NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ClientStatsPerToken) Reset() {
*x = ClientStatsPerToken{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ClientStatsPerToken) String() string {
@@ -205,7 +201,7 @@ func (*ClientStatsPerToken) ProtoMessage() {}
func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -237,10 +233,7 @@ func (x *ClientStatsPerToken) GetNumCalls() int64 {
// Contains client level statistics that are useful to load balancing. Each
// count except the timestamp should be reset to zero after reporting the stats.
type ClientStats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The timestamp of generating the report.
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// The total number of RPCs that started.
@@ -254,15 +247,15 @@ type ClientStats struct {
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"`
// The list of dropped calls.
CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ClientStats) Reset() {
*x = ClientStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ClientStats) String() string {
@@ -273,7 +266,7 @@ func (*ClientStats) ProtoMessage() {}
func (x *ClientStats) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -331,25 +324,22 @@ func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken {
}
type LoadBalanceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to LoadBalanceResponseType:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to LoadBalanceResponseType:
//
// *LoadBalanceResponse_InitialResponse
// *LoadBalanceResponse_ServerList
// *LoadBalanceResponse_FallbackResponse
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *LoadBalanceResponse) Reset() {
*x = LoadBalanceResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LoadBalanceResponse) String() string {
@@ -360,7 +350,7 @@ func (*LoadBalanceResponse) ProtoMessage() {}
func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -375,30 +365,36 @@ func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4}
}
-func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
- if m != nil {
- return m.LoadBalanceResponseType
+func (x *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
+ if x != nil {
+ return x.LoadBalanceResponseType
}
return nil
}
func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
- if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
- return x.InitialResponse
+ if x != nil {
+ if x, ok := x.LoadBalanceResponseType.(*LoadBalanceResponse_InitialResponse); ok {
+ return x.InitialResponse
+ }
}
return nil
}
func (x *LoadBalanceResponse) GetServerList() *ServerList {
- if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
- return x.ServerList
+ if x != nil {
+ if x, ok := x.LoadBalanceResponseType.(*LoadBalanceResponse_ServerList); ok {
+ return x.ServerList
+ }
}
return nil
}
func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse {
- if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok {
- return x.FallbackResponse
+ if x != nil {
+ if x, ok := x.LoadBalanceResponseType.(*LoadBalanceResponse_FallbackResponse); ok {
+ return x.FallbackResponse
+ }
}
return nil
}
@@ -431,18 +427,16 @@ func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponse
func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
type FallbackResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FallbackResponse) Reset() {
*x = FallbackResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FallbackResponse) String() string {
@@ -453,7 +447,7 @@ func (*FallbackResponse) ProtoMessage() {}
func (x *FallbackResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -469,23 +463,20 @@ func (*FallbackResponse) Descriptor() ([]byte, []int) {
}
type InitialLoadBalanceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This interval defines how often the client should send the client stats
// to the load balancer. Stats should only be reported when the duration is
// positive.
ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *InitialLoadBalanceResponse) Reset() {
*x = InitialLoadBalanceResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *InitialLoadBalanceResponse) String() string {
@@ -496,7 +487,7 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {}
func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -519,24 +510,21 @@ func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.
}
type ServerList struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Contains a list of servers selected by the load balancer. The list will
// be updated when server resolutions change or as needed to balance load
// across more servers. The client should consume the server list in order
// unless instructed otherwise via the client_config.
- Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"`
+ Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerList) Reset() {
*x = ServerList{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServerList) String() string {
@@ -547,7 +535,7 @@ func (*ServerList) ProtoMessage() {}
func (x *ServerList) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -572,10 +560,7 @@ func (x *ServerList) GetServers() []*Server {
// Contains server information. When the drop field is not true, use the other
// fields.
type Server struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A resolved address for the server, serialized in network-byte-order. It may
// either be an IPv4 or IPv6 address.
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
@@ -592,16 +577,16 @@ type Server struct {
// Indicates whether this particular request should be dropped by the client.
// If the request is dropped, there will be a corresponding entry in
// ClientStats.calls_finished_with_drop.
- Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"`
+ Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Server) Reset() {
*x = Server{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Server) String() string {
@@ -612,7 +597,7 @@ func (*Server) ProtoMessage() {}
func (x *Server) ProtoReflect() protoreflect.Message {
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -657,7 +642,7 @@ func (x *Server) GetDrop() bool {
var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor
-var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{
+var file_grpc_lb_v1_load_balancer_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61,
0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f,
@@ -765,22 +750,22 @@ var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62,
0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once
- file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc
+ file_grpc_lb_v1_load_balancer_proto_rawDescData []byte
)
func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte {
file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() {
- file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData)
+ file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_lb_v1_load_balancer_proto_rawDesc), len(file_grpc_lb_v1_load_balancer_proto_rawDesc)))
})
return file_grpc_lb_v1_load_balancer_proto_rawDescData
}
var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{
+var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{
(*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest
(*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest
(*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken
@@ -817,121 +802,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
if File_grpc_lb_v1_load_balancer_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LoadBalanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitialLoadBalanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ClientStatsPerToken); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ClientStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LoadBalanceResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FallbackResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InitialLoadBalanceResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerList); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Server); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{
(*LoadBalanceRequest_InitialRequest)(nil),
(*LoadBalanceRequest_ClientStats)(nil),
}
- file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{
(*LoadBalanceResponse_InitialResponse)(nil),
(*LoadBalanceResponse_ServerList)(nil),
(*LoadBalanceResponse_FallbackResponse)(nil),
@@ -940,7 +815,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_lb_v1_load_balancer_proto_rawDesc), len(file_grpc_lb_v1_load_balancer_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -951,7 +826,6 @@ func file_grpc_lb_v1_load_balancer_proto_init() {
MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes,
}.Build()
File_grpc_lb_v1_load_balancer_proto = out.File
- file_grpc_lb_v1_load_balancer_proto_rawDesc = nil
file_grpc_lb_v1_load_balancer_proto_goTypes = nil
file_grpc_lb_v1_load_balancer_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
index c57857ac0..84e6a2505 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
@@ -19,8 +19,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/lb/v1/load_balancer.proto
package grpc_lb_v1
@@ -34,8 +34,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad"
@@ -46,7 +46,7 @@ const (
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type LoadBalancerClient interface {
// Bidirectional rpc to get a list of servers.
- BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error)
+ BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error)
}
type loadBalancerClient struct {
@@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient {
return &loadBalancerClient{cc}
}
-func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) {
+func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &loadBalancerBalanceLoadClient{ClientStream: stream}
+ x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream}
return x, nil
}
-type LoadBalancer_BalanceLoadClient interface {
- Send(*LoadBalanceRequest) error
- Recv() (*LoadBalanceResponse, error)
- grpc.ClientStream
-}
-
-type loadBalancerBalanceLoadClient struct {
- grpc.ClientStream
-}
-
-func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) {
- m := new(LoadBalanceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse]
// LoadBalancerServer is the server API for LoadBalancer service.
// All implementations should embed UnimplementedLoadBalancerServer
-// for forward compatibility
+// for forward compatibility.
type LoadBalancerServer interface {
// Bidirectional rpc to get a list of servers.
- BalanceLoad(LoadBalancer_BalanceLoadServer) error
+ BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error
}
-// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations.
-type UnimplementedLoadBalancerServer struct {
-}
+// UnimplementedLoadBalancerServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedLoadBalancerServer struct{}
-func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error {
+func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error {
return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented")
}
+func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {}
// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to LoadBalancerServer will
@@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface {
}
func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) {
+ // If the following call panics, it indicates UnimplementedLoadBalancerServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&LoadBalancer_ServiceDesc, srv)
}
func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream})
-}
-
-type LoadBalancer_BalanceLoadServer interface {
- Send(*LoadBalanceResponse) error
- Recv() (*LoadBalanceRequest, error)
- grpc.ServerStream
-}
-
-type loadBalancerBalanceLoadServer struct {
- grpc.ServerStream
+ return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream})
}
-func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) {
- m := new(LoadBalanceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]
// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
index 47a3e938d..0770b88e9 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
@@ -197,7 +197,7 @@ type lbBalancer struct {
// manualResolver is used in the remote LB ClientConn inside grpclb. When
// resolved address updates are received by grpclb, filtered updates will be
- // send to remote LB ClientConn through this resolver.
+ // sent to remote LB ClientConn through this resolver.
manualResolver *manual.Resolver
// The ClientConn to talk to the remote balancer.
ccRemoteLB *remoteBalancerCCWrapper
@@ -219,7 +219,7 @@ type lbBalancer struct {
// All backends addresses, with metadata set to nil. This list contains all
// backend addresses in the same order and with the same duplicates as in
// serverlist. When generating picker, a SubConn slice with the same order
- // but with only READY SCs will be gerenated.
+ // but with only READY SCs will be generated.
backendAddrsWithoutMetadata []resolver.Address
// Roundrobin functionalities.
state connectivity.State
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
index 8942c3131..96a57c8c7 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
@@ -21,14 +21,14 @@ package grpclb
import (
"encoding/json"
- "google.golang.org/grpc"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/serviceconfig"
)
const (
roundRobinName = roundrobin.Name
- pickFirstName = grpc.PickFirstBalancerName
+ pickFirstName = pickfirst.Name
)
type grpclbServiceConfig struct {
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
index 20c5f2ec3..9ff07522d 100644
--- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
@@ -19,13 +19,13 @@
package grpclb
import (
+ rand "math/rand/v2"
"sync"
"sync/atomic"
"google.golang.org/grpc/balancer"
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/status"
)
@@ -112,7 +112,7 @@ type rrPicker struct {
func newRRPicker(readySCs []balancer.SubConn) *rrPicker {
return &rrPicker{
subConns: readySCs,
- subConnsNext: grpcrand.Intn(len(readySCs)),
+ subConnsNext: rand.IntN(len(readySCs)),
}
}
@@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *
return &lbPicker{
serverList: serverList,
subConns: readySCs,
- subConnsNext: grpcrand.Intn(len(readySCs)),
+ subConnsNext: rand.IntN(len(readySCs)),
stats: stats,
}
}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
new file mode 100644
index 000000000..7d66cb491
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import (
+ rand "math/rand/v2"
+ "time"
+)
+
+var (
+ // RandShuffle pseudo-randomizes the order of addresses.
+ RandShuffle = rand.Shuffle
+ // TimeAfterFunc allows mocking the timer for testing connection delay
+ // related functionality.
+ TimeAfterFunc = func(d time.Duration, f func()) func() {
+ timer := time.AfterFunc(d, f)
+ return func() { timer.Stop() }
+ }
+)
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
similarity index 85%
rename from vendor/google.golang.org/grpc/pickfirst.go
rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 885362661..ea8899818 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,38 +16,53 @@
*
*/
-package grpc
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
import (
"encoding/json"
"errors"
"fmt"
+ rand "math/rand/v2"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
+
+ _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ return
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+var logger = grpclog.Component("pick-first-lb")
+
const (
- // PickFirstBalancerName is the name of the pick_first balancer.
- PickFirstBalancerName = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ // Name is the name of the pick_first balancer.
+ Name = "pick_first"
+ logPrefix = "[pick-first-lb %p] "
)
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
func (pickfirstBuilder) Name() string {
- return PickFirstBalancerName
+ return Name
}
type pfConfig struct {
@@ -93,6 +108,15 @@ func (b *pickfirstBalancer) ResolverError(err error) {
})
}
+// Shuffler is an interface for shuffling an address list.
+type Shuffler interface {
+ ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
+// is the number of elements. swap swaps the elements with indexes i and j.
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// The resolver reported an empty address list. Treat it like an error by
@@ -124,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
- grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for each
@@ -139,13 +163,13 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
- // target do not forwarrd the corresponding correct endpoints down/split
+ // target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
addrs = append([]resolver.Address{}, addrs...)
- grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
}
}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 000000000..113181e6b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,932 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
+ "google.golang.org/grpc/connectivity"
+ expstats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ // Register as the default pick_first balancer.
+ Name = "pick_first"
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+type (
+ // enableHealthListenerKeyType is a unique key type used in resolver
+ // attributes to indicate whether the health listener usage is enabled.
+ enableHealthListenerKeyType struct{}
+ // managedByPickfirstKeyType is an attribute key type to inform Outlier
+ // Detection that the generic health listener is being used.
+ // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
+ // implementing the dualstack design. This is a hack. Once Dualstack is
+ // completed, outlier detection will stop sending ejection updates through
+ // the connectivity listener.
+ managedByPickfirstKeyType struct{}
+)
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ // Name is the name of the pick_first_leaf balancer.
+ // It is changed to "pick_first" in init() if this balancer is to be
+ // registered as the default pickfirst.
+ Name = "pick_first_leaf"
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "disconnection",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "attempt",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "attempt",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+)
+
+const (
+ // TODO: change to pick-first when this becomes the default pick_first policy.
+ logPrefix = "[pick-first-leaf-lb %p] "
+ // connectionDelayInterval is the time to wait for during the happy eyeballs
+ // pass before starting the next connection attempt.
+ connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+ // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+ // address.
+ ipAddrFamilyUnknown ipAddrFamily = iota
+ ipAddrFamilyV4
+ ipAddrFamilyV6
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ target: bo.Target.String(),
+ metricsRecorder: cc.MetricsRecorder(),
+
+ subConns: resolver.NewAddressMap(),
+ state: connectivity.Connecting,
+ cancelConnectionTimer: func() {},
+ }
+ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+ return b
+}
+
+func (b pickfirstBuilder) Name() string {
+ return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+func EnableHealthListener(state resolver.State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+ return state
+}
+
+// IsManagedByPickfirst returns whether an address belongs to a SubConn
+// managed by the pickfirst LB policy.
+// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
+// outlier_detection via the with connectivity listener when using pick_first.
+// Once Dualstack changes are complete, all SubConns will be created by
+// pick_first and outlier detection will only use the health listener for
+// ejection. This hack can then be removed.
+func IsManagedByPickfirst(addr resolver.Address) bool {
+ return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
+}
+
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of endpoints received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ rawConnectivityState connectivity.State
+ // The effective connectivity state based on raw connectivity, health state
+ // and after following sticky TransientFailure behaviour defined in A62.
+ effectiveState connectivity.State
+ lastErr error
+ connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
+ sd := &scData{
+ rawConnectivityState: connectivity.Idle,
+ effectiveState: connectivity.Idle,
+ addr: addr,
+ }
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
+}
+
+type pickfirstBalancer struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+ target string
+ metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ // State reported to the channel based on SubConn states and resolver
+ // updates.
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMap
+ addressList addressList
+ firstPass bool
+ numTF int
+ cancelConnectionTimer func()
+ healthCheckingEnabled bool
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received error from the name resolver: %v", err)
+ }
+
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
+ return
+ }
+
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.cancelConnectionTimer()
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
+ cfg, ok := state.BalancerConfig.(pfConfig)
+ if state.BalancerConfig != nil && !ok {
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+ }
+
+ if b.logger.V(2) {
+ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+ }
+
+ var newAddrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ newAddrs = append(newAddrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ newAddrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+ }
+
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+ newAddrs = interleaveAddresses(newAddrs)
+
+ prevAddr := b.addressList.currentAddress()
+ prevSCData, found := b.subConns.Get(prevAddr)
+ prevAddrsCount := b.addressList.size()
+ isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
+ b.addressList.updateAddrs(newAddrs)
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
+ return nil
+ }
+
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.forceUpdateConcludedStateLocked(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.startFirstPassLocked()
+ }
+ return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.cancelConnectionTimer()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle {
+ b.startFirstPassLocked()
+ }
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+ b.firstPass = true
+ b.numTF = 0
+ // Reset the connection attempt record for existing SubConns.
+ for _, sd := range b.subConns.Values() {
+ sd.(*scData).connectionFailedInFirstPass = false
+ }
+ b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.(*scData).subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMap()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMap()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+ familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+ interleavingOrder := []ipAddrFamily{}
+ for _, addr := range addrs {
+ family := addressFamily(addr.Addr)
+ if _, found := familyAddrsMap[family]; !found {
+ interleavingOrder = append(interleavingOrder, family)
+ }
+ familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+ }
+
+ interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+ for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+ // Some IP types may have fewer addresses than others, so we look for
+ // the next type that has a remaining member to add to the interleaved
+ // list.
+ family := interleavingOrder[curFamilyIdx]
+ remainingMembers := familyAddrsMap[family]
+ if len(remainingMembers) > 0 {
+ interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+ familyAddrsMap[family] = remainingMembers[1:]
+ }
+ }
+
+ return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+ // Parse the IP after removing the port.
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ switch {
+ case ip.Is4() || ip.Is4In6():
+ return ipAddrFamilyV4
+ case ip.Is6():
+ return ipAddrFamilyV6
+ default:
+ return ipAddrFamilyUnknown
+ }
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMap()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.(*scData).subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ b.cancelConnectionTimer()
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMap()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ scd := sd.(*scData)
+ switch scd.rawConnectivityState {
+ case connectivity.Idle:
+ scd.subConn.Connect()
+ b.scheduleNextConnectionLocked()
+ return
+ case connectivity.TransientFailure:
+ // The SubConn is being re-used and failed during a previous pass
+ // over the addressList. It has not completed backoff yet.
+ // Mark it as having failed and try the next address.
+ scd.connectionFailedInFirstPass = true
+ lastErr = scd.lastErr
+ continue
+ case connectivity.Connecting:
+ // Wait for the connection attempt to complete or the timer to fire
+ // before attempting the next address.
+ b.scheduleNextConnectionLocked()
+ return
+ default:
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
+ return
+
+ }
+ }
+
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass if possible.
+ b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+ b.cancelConnectionTimer()
+ if !b.addressList.hasNext() {
+ return
+ }
+ curAddr := b.addressList.currentAddress()
+ cancelled := false // Access to this is protected by the balancer's mutex.
+ closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // If the scheduled task is cancelled while acquiring the mutex, return.
+ if cancelled {
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ }
+ })
+ // Access to the cancellation callback held by the balancer is guarded by
+ // the balancer's mutex, so it's safe to set the boolean from the callback.
+ b.cancelConnectionTimer = sync.OnceFunc(func() {
+ cancelled = true
+ closeFn()
+ })
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.rawConnectivityState
+ sd.rawConnectivityState = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ if newState.ConnectivityState == connectivity.Shutdown {
+ sd.effectiveState = connectivity.Shutdown
+ return
+ }
+
+ // Record a connection attempt when exiting CONNECTING.
+ if newState.ConnectivityState == connectivity.TransientFailure {
+ sd.connectionFailedInFirstPass = true
+ connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
+ return
+ }
+ if !b.healthCheckingEnabled {
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+ }
+
+ sd.effectiveState = connectivity.Ready
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+ }
+ // Send a CONNECTING update to take the SubConn out of sticky-TF if
+ // required.
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+ b.updateSubConnHealthState(sd, scs)
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ sd.effectiveState = newState.ConnectivityState
+ // READY SubConn interspliced in between CONNECTING and IDLE, need to
+ // account for that.
+ if oldState == connectivity.Connecting {
+ // A known issue (https://github.com/grpc/grpc-go/issues/7862)
+ // causes a race that prevents the READY state change notification.
+ // This works around it.
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+ disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+ b.addressList.reset()
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The effective state can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ if sd.effectiveState != connectivity.TransientFailure {
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ sd.effectiveState = connectivity.TransientFailure
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. Happy Eyeballs will also
+ // cause out of order updates to arrive.
+
+ if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ b.cancelConnectionTimer()
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ }
+
+ // End the first pass if we've seen a TRANSIENT_FAILURE from all
+ // SubConns once.
+ b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ }
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+ // An optimization to avoid iterating over the entire SubConn map.
+ if b.addressList.isValid() {
+ return
+ }
+ // Connect() has been called on all the SubConns. The first pass can be
+ // ended if all the SubConns have reported a failure.
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if !sd.connectionFailedInFirstPass {
+ return
+ }
+ }
+ b.firstPass = false
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.rawConnectivityState == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+ activeSD, found := b.subConns.Get(sd.addr)
+ return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes
+ // this SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ sd.effectiveState = state.ConnectivityState
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ case connectivity.TransientFailure:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+ })
+ case connectivity.Connecting:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ default:
+ b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
+ }
+}
+
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+ // In case of TransientFailures allow the picker to be updated to update
+ // the connectivity error, in all other cases don't send duplicate state
+ // updates.
+ if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+ return
+ }
+ b.forceUpdateConcludedStateLocked(newState)
+}
+
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+ b.state = newState.ConnectivityState
+ b.cc.UpdateState(newState)
+}
+
+type picker struct {
+ result balancer.PickResult
+ err error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.exitIdle()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+ if !al.isValid() {
+ return false
+ }
+ return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index f7031ad22..35da5d1ec 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,12 +22,13 @@
package roundrobin
import (
- "sync/atomic"
+ "fmt"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/endpointsharding"
+ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
// Name is the name of round_robin balancer.
@@ -35,47 +36,44 @@ const Name = "round_robin"
var logger = grpclog.Component("roundrobin")
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
- return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
-}
-
func init() {
- balancer.Register(newBuilder())
+ balancer.Register(builder{})
}
-type rrPickerBuilder struct{}
+type builder struct{}
-func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
- logger.Infof("roundrobinPicker: Build called with info: %v", info)
- if len(info.ReadySCs) == 0 {
- return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
- }
- scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
- for sc := range info.ReadySCs {
- scs = append(scs, sc)
- }
- return &rrPicker{
- subConns: scs,
- // Start at a random index, as the same RR balancer rebuilds a new
- // picker when SubConn states change, and we don't want to apply excess
- // load to the first server in the list.
- next: uint32(grpcrand.Intn(len(scs))),
+func (bb builder) Name() string {
+ return Name
+}
+
+func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
+ childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ bal := &rrBalancer{
+ cc: cc,
+ Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
}
+ bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
+ bal.logger.Infof("Created")
+ return bal
}
-type rrPicker struct {
- // subConns is the snapshot of the roundrobin balancer when this picker was
- // created. The slice is immutable. Each Get() will do a round robin
- // selection from it and return the selected SubConn.
- subConns []balancer.SubConn
- next uint32
+type rrBalancer struct {
+ balancer.Balancer
+ cc balancer.ClientConn
+ logger *internalgrpclog.PrefixLogger
}
-func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- subConnsLen := uint32(len(p.subConns))
- nextIndex := atomic.AddUint32(&p.next, 1)
+func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
+ return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
+ // Enable the health listener in pickfirst children for client side health
+ // checks and outlier detection, if configured.
+ ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ })
+}
- sc := p.subConns[nextIndex%subConnsLen]
- return balancer.PickResult{SubConn: sc}, nil
+func (b *rrBalancer) ExitIdle() {
+ // Should always be ok, as child is endpoint sharding.
+ if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
+ ei.ExitIdle()
+ }
}
diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
new file mode 100644
index 000000000..9ee44d4af
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/subconn.go
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import (
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/resolver"
+)
+
+// A SubConn represents a single connection to a gRPC backend service.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger a
+// connection attempt, Balancers must call Connect.
+//
+// If the connection attempt fails, the SubConn will transition to
+// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the
+// connection attempt succeeds, it will transition to READY.
+//
+// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
+//
+// If a connection re-enters IDLE, Balancers must call Connect again to trigger
+// a new connection attempt.
+//
+// Each SubConn contains a list of addresses. gRPC will try to connect to the
+// addresses in sequence, and stop trying the remainder once the first
+// connection is successful. However, this behavior is deprecated. SubConns
+// should only use a single address.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices. Users should not need their own complete
+// implementation of this interface -- they should always delegate to a SubConn
+// returned by ClientConn.NewSubConn() by embedding it in their implementations.
+// An embedded SubConn must never be nil, or runtime panics will occur.
+type SubConn interface {
+ // UpdateAddresses updates the addresses used in this SubConn.
+ // gRPC checks if currently-connected address is still in the new list.
+ // If it's in the list, the connection will be kept.
+ // If it's not in the list, the connection will gracefully close, and
+ // a new connection will be created.
+ //
+ // This will trigger a state transition for the SubConn.
+ //
+ // Deprecated: this method will be removed. Create new SubConns for new
+ // addresses instead.
+ UpdateAddresses([]resolver.Address)
+ // Connect starts the connecting for this SubConn.
+ Connect()
+ // GetOrBuildProducer returns a reference to the existing Producer for this
+ // ProducerBuilder in this SubConn, or, if one does not currently exist,
+ // creates a new one and returns it. Returns a close function which may be
+ // called when the Producer is no longer needed. Otherwise the producer
+ // will automatically be closed upon connection loss or subchannel close.
+ // Should only be called on a SubConn in state Ready. Otherwise the
+ // producer will be unable to create streams.
+ GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
+ // Shutdown shuts down the SubConn gracefully. Any started RPCs will be
+ // allowed to complete. No future calls should be made on the SubConn.
+ // One final state update will be delivered to the StateListener (or
+ // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
+ // indicate the shutdown operation. This may be delivered before
+ // in-progress RPCs are complete and the actual connection is closed.
+ Shutdown()
+ // RegisterHealthListener registers a health listener that receives health
+ // updates for a Ready SubConn. Only one health listener can be registered
+ // at a time. A health listener should be registered each time the SubConn's
+ // connectivity state changes to READY. Registering a health listener when
+ // the connectivity state is not READY may result in undefined behaviour.
+ // This method must not be called synchronously while handling an update
+ // from a previously registered health listener.
+ RegisterHealthListener(func(SubConnState))
+ // EnforceSubConnEmbedding is included to force implementers to embed
+ // another implementation of this interface, allowing gRPC to add methods
+ // without breaking users.
+ internal.EnforceSubConnEmbedding
+}
+
+// A ProducerBuilder is a simple constructor for a Producer. It is used by the
+// SubConn to create producers when needed.
+type ProducerBuilder interface {
+ // Build creates a Producer. The first parameter is always a
+ // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
+ // associated SubConn), but is declared as `any` to avoid a dependency
+ // cycle. Build also returns a close function that will be called when all
+ // references to the Producer have been given up for a SubConn, or when a
+ // connectivity state change occurs on the SubConn. The close function
+ // should always block until all asynchronous cleanup work is completed.
+ Build(grpcClientConnInterface any) (p Producer, close func())
+}
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+ // ConnectivityState is the connectivity state of the SubConn.
+ ConnectivityState connectivity.State
+ // ConnectionError is set if the ConnectivityState is TransientFailure,
+ // describing the reason the SubConn failed. Otherwise, it is nil.
+ ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
+}
+
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
+// A Producer is a type shared among potentially many consumers. It is
+// associated with a SubConn, and an implementation will typically contain
+// other methods to provide additional functionality, e.g. configuration or
+// subscription registration.
+type Producer any
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index af39b8a4c..948a21ef6 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -24,11 +24,25 @@ import (
"sync"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
+)
+
+var (
+ setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+ // noOpRegisterHealthListenerFn is used when client side health checking is
+ // disabled. It sends a single READY update on the registered listener.
+ noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
+ listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
+ return func() {}
+ }
)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
@@ -46,6 +60,7 @@ import (
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
+ internal.EnforceClientConnEmbedding
// The following fields are initialized when the wrapper is created and are
// read-only afterwards, and therefore can be accessed without a mutex.
cc *ClientConn
@@ -87,12 +102,16 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
return ccb
}
+func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
+ return ccb.cc.metricsRecorderList
+}
+
// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer. This is always executed from the serializer, so
// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
errCh := make(chan error)
- ok := ccb.serializer.Schedule(func(ctx context.Context) {
+ uccs := func(ctx context.Context) {
defer close(errCh)
if ctx.Err() != nil || ccb.balancer == nil {
return
@@ -107,17 +126,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
errCh <- err
- })
- if !ok {
- return nil
}
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
return <-errCh
}
// resolverError is invoked by grpc to push a resolver error to the underlying
// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -133,7 +158,7 @@ func (ccb *ccBalancerWrapper) close() {
ccb.closed = true
ccb.mu.Unlock()
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
- ccb.serializer.Schedule(func(context.Context) {
+ ccb.serializer.TrySchedule(func(context.Context) {
if ccb.balancer == nil {
return
}
@@ -145,7 +170,7 @@ func (ccb *ccBalancerWrapper) close() {
// exitIdle invokes the balancer's exitIdle method in the serializer.
func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -177,12 +202,13 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
ac: ac,
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
stateListener: opts.StateListener,
+ healthData: newHealthData(connectivity.Idle),
}
ac.acbw = acbw
return acbw, nil
}
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
// The graceful switch balancer will never call this.
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
}
@@ -198,6 +224,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
ccb.cc.mu.Lock()
defer ccb.cc.mu.Unlock()
+ if ccb.cc.conns == nil {
+ // The CC has been closed; ignore this update.
+ return
+ }
ccb.mu.Lock()
if ccb.closed {
@@ -238,25 +268,77 @@ func (ccb *ccBalancerWrapper) Target() string {
// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
+ internal.EnforceSubConnEmbedding
ac *addrConn // read-only
ccb *ccBalancerWrapper // read-only
stateListener func(balancer.SubConnState)
- mu sync.Mutex
- producers map[balancer.ProducerBuilder]*refCountedProducer
+ producersMu sync.Mutex
+ producers map[balancer.ProducerBuilder]*refCountedProducer
+
+ // Access to healthData is protected by healthMu.
+ healthMu sync.Mutex
+ // healthData is stored as a pointer to detect when the health listener is
+ // dropped or updated. This is required as closures can't be compared for
+ // equality.
+ healthData *healthData
+}
+
+// healthData holds data related to health state reporting.
+type healthData struct {
+ // connectivityState stores the most recent connectivity state delivered
+ // to the LB policy. This is stored to avoid sending updates when the
+ // SubConn has already exited connectivity state READY.
+ connectivityState connectivity.State
+ // closeHealthProducer stores function to close the ref counted health
+ // producer. The health producer is automatically closed when the SubConn
+ // state changes.
+ closeHealthProducer func()
+}
+
+func newHealthData(s connectivity.State) *healthData {
+ return &healthData{
+ connectivityState: s,
+ closeHealthProducer: func() {},
+ }
}
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
- acbw.ccb.serializer.Schedule(func(ctx context.Context) {
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
}
+ // Invalidate all producers on any state change.
+ acbw.closeProducers()
+
// Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
- acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ // Invalidate the health listener by updating the healthData.
+ acbw.healthMu.Lock()
+ // A race may occur if a health listener is registered soon after the
+ // connectivity state is set but before the stateListener is called.
+ // Two cases may arise:
+ // 1. The new state is not READY: RegisterHealthListener has checks to
+ // ensure no updates are sent when the connectivity state is not
+ // READY.
+ // 2. The new state is READY: This means that the old state wasn't Ready.
+ // The RegisterHealthListener API mentions that a health listener
+ // must not be registered when a SubConn is not ready to avoid such
+ // races. When this happens, the LB policy would get health updates
+ // on the old listener. When the LB policy registers a new listener
+ // on receiving the connectivity update, the health updates will be
+ // sent to the new health listener.
+ acbw.healthData = newHealthData(scs.ConnectivityState)
+ acbw.healthMu.Unlock()
+
+ acbw.stateListener(scs)
})
}
@@ -273,6 +355,7 @@ func (acbw *acBalancerWrapper) Connect() {
}
func (acbw *acBalancerWrapper) Shutdown() {
+ acbw.closeProducers()
acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
@@ -280,9 +363,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
// ready, blocks until it is or ctx expires. Returns an error when the context
// expires or the addrConn is shut down.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport, err := acbw.ac.getTransport(ctx)
- if err != nil {
- return nil, err
+ transport := acbw.ac.getReadyTransport()
+ if transport == nil {
+ return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
@@ -307,15 +391,15 @@ type refCountedProducer struct {
}
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
// Look up existing producer from this builder.
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
acbw.producers[pb] = pData
}
// Account for this new reference.
@@ -325,13 +409,112 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
// and delete the refCountedProducer from the map if the total reference
// count goes to zero.
unref := func() {
- acbw.mu.Lock()
+ acbw.producersMu.Lock()
+ // If closeProducers has already closed this producer instance, refs is
+ // set to 0, so the check after decrementing will never pass, and the
+ // producer will not be double-closed.
pData.refs--
if pData.refs == 0 {
defer pData.close() // Run outside the acbw mutex
delete(acbw.producers, pb)
}
- acbw.mu.Unlock()
+ acbw.producersMu.Unlock()
+ }
+ return pData.producer, sync.OnceFunc(unref)
+}
+
+func (acbw *acBalancerWrapper) closeProducers() {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+ for pb, pData := range acbw.producers {
+ pData.refs = 0
+ pData.close()
+ delete(acbw.producers, pb)
}
- return pData.producer, grpcsync.OnceFunc(unref)
+}
+
+// healthProducerRegisterFn is a type alias for the health producer's function
+// for registering listeners.
+type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
+
+// healthListenerRegFn returns a function to register a listener for health
+// updates. If client side health checks are disabled, the registered listener
+// will get a single READY (raw connectivity state) update.
+//
+// Client side health checking is enabled when all the following
+// conditions are satisfied:
+// 1. Health checking is not disabled using the dial option.
+// 2. The health package is imported.
+// 3. The health check config is present in the service config.
+func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
+ if acbw.ccb.cc.dopts.disableHealthCheck {
+ return noOpRegisterHealthListenerFn
+ }
+ regHealthLisFn := internal.RegisterClientHealthCheckListener
+ if regHealthLisFn == nil {
+ // The health package is not imported.
+ return noOpRegisterHealthListenerFn
+ }
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
+ return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
+ return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
+ }
+}
+
+// RegisterHealthListener accepts a health listener from the LB policy. It sends
+// updates to the health listener as long as the SubConn's connectivity state
+// doesn't change and a new health listener is not registered. To invalidate
+// the currently registered health listener, acbw updates the healthData. If a
+// nil listener is registered, the active health listener is dropped.
+func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ acbw.healthData.closeHealthProducer()
+ // listeners should not be registered when the connectivity state
+ // isn't Ready. This may happen when the balancer registers a listener
+ // after the connectivityState is updated, but before it is notified
+ // of the update.
+ if acbw.healthData.connectivityState != connectivity.Ready {
+ return
+ }
+ // Replace the health data to stop sending updates to any previously
+ // registered health listeners.
+ hd := newHealthData(connectivity.Ready)
+ acbw.healthData = hd
+ if listener == nil {
+ return
+ }
+
+ registerFn := acbw.healthListenerRegFn()
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ // Don't send updates if a new listener is registered.
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ if acbw.healthData != hd {
+ return
+ }
+ // Serialize the health updates from the health producer with
+ // other calls into the LB policy.
+ listenerWrapper := func(scs balancer.SubConnState) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ if acbw.healthData != hd {
+ return
+ }
+ listener(scs)
+ })
+ }
+
+ hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
+ })
}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 1afb1e84a..b2f8fc7f4 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.2
+// protoc-gen-go v1.36.4
+// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@@ -31,6 +31,7 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) {
// Log entry we store in binary logs
type GrpcLogEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The timestamp of the binary log message
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
@@ -255,7 +253,7 @@ type GrpcLogEntry struct {
// The logger uses one of the following fields to record the payload,
// according to the type of the log entry.
//
- // Types that are assignable to Payload:
+ // Types that are valid to be assigned to Payload:
//
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
@@ -269,16 +267,16 @@ type GrpcLogEntry struct {
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
// the case of trailers-only. On server side, peer is always
// logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+ Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GrpcLogEntry) Reset() {
*x = GrpcLogEntry{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GrpcLogEntry) String() string {
@@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {}
func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -339,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
return GrpcLogEntry_LOGGER_UNKNOWN
}
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if m != nil {
- return m.Payload
+func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+ if x != nil {
+ return x.Payload
}
return nil
}
func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
+ return x.ClientHeader
+ }
}
return nil
}
func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
+ return x.ServerHeader
+ }
}
return nil
}
func (x *GrpcLogEntry) GetMessage() *Message {
- if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
- return x.Message
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
+ return x.Message
+ }
}
return nil
}
func (x *GrpcLogEntry) GetTrailer() *Trailer {
- if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
+ return x.Trailer
+ }
}
return nil
}
@@ -418,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
type ClientHeader struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The name of the RPC method, which looks something like:
@@ -435,16 +438,16 @@ type ClientHeader struct {
// or : .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
// the RPC timeout
- Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ClientHeader) Reset() {
*x = ClientHeader{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ClientHeader) String() string {
@@ -455,7 +458,7 @@ func (*ClientHeader) ProtoMessage() {}
func (x *ClientHeader) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -499,21 +502,18 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration {
}
type ServerHeader struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerHeader) Reset() {
*x = ServerHeader{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServerHeader) String() string {
@@ -524,7 +524,7 @@ func (*ServerHeader) ProtoMessage() {}
func (x *ServerHeader) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -547,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata {
}
type Trailer struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The gRPC status code.
@@ -561,15 +558,15 @@ type Trailer struct {
// The value of the 'grpc-status-details-bin' metadata key. If
// present, this is always an encoded 'google.rpc.Status' message.
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Trailer) Reset() {
*x = Trailer{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Trailer) String() string {
@@ -580,7 +577,7 @@ func (*Trailer) ProtoMessage() {}
func (x *Trailer) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -625,24 +622,21 @@ func (x *Trailer) GetStatusDetails() []byte {
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
type Message struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Length of the message. It may not be the same as the length of the
// data field, as the logging payload can be truncated or omitted.
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
// May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Message) Reset() {
*x = Message{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Message) String() string {
@@ -653,7 +647,7 @@ func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -704,20 +698,17 @@ func (x *Message) GetData() []byte {
// header is just a normal metadata key.
// The pair will not count towards the size limit.
type Metadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Metadata) Reset() {
*x = Metadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Metadata) String() string {
@@ -728,7 +719,7 @@ func (*Metadata) ProtoMessage() {}
func (x *Metadata) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -752,21 +743,18 @@ func (x *Metadata) GetEntry() []*MetadataEntry {
// A metadata key value pair
type MetadataEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *MetadataEntry) Reset() {
*x = MetadataEntry{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MetadataEntry) String() string {
@@ -777,7 +765,7 @@ func (*MetadataEntry) ProtoMessage() {}
func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -808,23 +796,20 @@ func (x *MetadataEntry) GetValue() []byte {
// Address information
type Address struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
- Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+ Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+ IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Address) Reset() {
*x = Address{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Address) String() string {
@@ -835,7 +820,7 @@ func (*Address) ProtoMessage() {}
func (x *Address) ProtoReflect() protoreflect.Message {
mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -873,7 +858,7 @@ func (x *Address) GetIpPort() uint32 {
var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
-var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
@@ -999,23 +984,23 @@ var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-}
+})
var (
file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
- file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+ file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
)
func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
- file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+ file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
})
return file_grpc_binlog_v1_binarylog_proto_rawDescData
}
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@@ -1057,105 +1042,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
if File_grpc_binlog_v1_binarylog_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GrpcLogEntry); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ClientHeader); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerHeader); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Trailer); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Message); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Metadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MetadataEntry); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Address); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
@@ -1165,7 +1052,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
NumEnums: 3,
NumMessages: 8,
NumExtensions: 0,
@@ -1177,7 +1064,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
}.Build()
File_grpc_binlog_v1_binarylog_proto = out.File
- file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
file_grpc_binlog_v1_binarylog_proto_goTypes = nil
file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 2359f94b8..a319ef979 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -31,6 +32,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal"
@@ -38,6 +40,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -72,6 +75,8 @@ var (
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+ // PickFirstBalancerName is the name of the pick_first balancer.
+ PickFirstBalancerName = pickfirst.Name
)
// The following errors are returned from Dial and DialContext
@@ -113,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
// is performed. Use of the ClientConn for RPCs will automatically cause it to
-// connect. Connect may be used to manually create a connection, but for most
-// users this is unnecessary.
+// connect. The Connect method may be called to manually create a connection,
+// but for most users this should be unnecessary.
//
// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns
-// resolver, a "dns:///" prefix should be applied to the target.
+// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns
+// name resolver, a "dns:///" prefix may be applied to the target. The default
+// name resolver will be used if no scheme is detected, or if the parsed scheme
+// is not a registered name resolver. The default resolver is "dns" but can be
+// overridden using the resolver package's SetDefaultScheme.
+//
+// Examples:
+//
+// - "foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com"
+// - "dns:///10.0.0.213:8080"
+// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
+// - "dns://8.8.8.8/foo.googleapis.com:8080"
+// - "dns://8.8.8.8/foo.googleapis.com"
+// - "zookeeper://zk.example.com:9900/example_service"
//
// The DialOptions returned by WithBlock, WithTimeout,
// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
@@ -152,6 +171,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
for _, opt := range opts {
opt.apply(&cc.dopts)
}
+
+ // Determine the resolver to use.
+ if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+ return nil, err
+ }
+
+ for _, opt := range globalPerTargetDialOptions {
+ opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+ }
+
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
@@ -160,38 +189,32 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
if scpr.Err != nil {
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
}
cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
}
- cc.mkp = cc.dopts.copts.KeepaliveParams
-
- // Register ClientConn with channelz.
- cc.channelzRegistration(target)
-
- // TODO: Ideally it should be impossible to error from this function after
- // channelz registration. This will require removing some channelz logs
- // from the following functions that can error. Errors can be returned to
- // the user, and successful logs can be emitted here, after the checks have
- // passed and channelz is subsequently registered.
+ cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
- // Determine the resolver to use.
- if err := cc.parseTargetAndFindResolver(); err != nil {
- channelz.RemoveEntry(cc.channelz.ID)
- return nil, err
- }
- if err = cc.determineAuthority(); err != nil {
- channelz.RemoveEntry(cc.channelz.ID)
+ if err = cc.initAuthority(); err != nil {
return nil, err
}
+ // Register ClientConn with channelz. Note that this is only done after
+ // channel creation cannot fail.
+ cc.channelzRegistration(target)
+ channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+ channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
return cc, nil
}
@@ -216,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
// At the end of this method, we kick the channel out of idle, rather than
// waiting for the first rpc.
- opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
+ //
+ // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
+ // preserves behavior: when default scheme passthrough is used, skip
+ // hostname resolution, when "dns" is used for resolution, perform
+ // resolution on the client.
+ opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
cc, err := NewClient(target, opts...)
if err != nil {
return nil, err
@@ -586,13 +614,14 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See parseTargetAndFindResolver().
- authority string // See determineAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
- idlenessMgr *idle.Manager
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -608,7 +637,7 @@ type ClientConn struct {
balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
- mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+ keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
// firstResolveEvent is used to track whether the name resolver sent us at
// least one update. RPCs block on this event. May be accessed without mu
// if we know we cannot be asked to enter idle mode while accessing it (e.g.
@@ -622,11 +651,6 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -641,11 +665,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@@ -692,8 +711,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
var emptyServiceConfig *ServiceConfig
func init() {
- balancer.Register(pickfirstBuilder{})
- cfg := parseServiceConfig("{}")
+ cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
if cfg.Err != nil {
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
}
@@ -776,10 +794,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error)
}
}
- var balCfg serviceconfig.LoadBalancingConfig
- if cc.sc != nil && cc.sc.lbConfig != nil {
- balCfg = cc.sc.lbConfig
- }
+ balCfg := cc.sc.lbConfig
bw := cc.balancerWrapper
cc.mu.Unlock()
@@ -809,17 +824,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
cc.csMgr.updateState(connectivity.TransientFailure)
}
-// Makes a copy of the input addresses slice and clears out the balancer
-// attributes field. Addresses are passed during subconn creation and address
-// update operations. In both cases, we will clear the balancer attributes by
-// calling this function, and therefore we will be able to use the Equal method
-// provided by the resolver.Address type for comparison.
-func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
- for i := range in {
- out[i] = in[i]
- out[i].BalancerAttributes = nil
- }
+ copy(out, in)
return out
}
@@ -834,12 +843,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
- addrs: copyAddressesWithoutBalancerAttributes(addrs),
+ addrs: copyAddresses(addrs),
scopts: opts,
dopts: cc.dopts,
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
- stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
@@ -878,7 +886,13 @@ func (cc *ClientConn) Target() string {
return cc.target
}
-// CanonicalTarget returns the canonical target string of the ClientConn.
+// CanonicalTarget returns the canonical target string used when creating cc.
+//
+// This always has the form "://[authority]/". For example:
+//
+// - "dns:///example.com:42"
+// - "dns://8.8.8.8/example.com:42"
+// - "unix:///path/to/socket"
func (cc *ClientConn) CanonicalTarget() string {
return cc.parsedTarget.String()
}
@@ -915,28 +929,29 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.mu.Unlock()
- ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-func equalAddresses(a, b []resolver.Address) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if !v.Equal(b[i]) {
- return false
- }
- }
- return true
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ addrs = copyAddresses(addrs)
limit := len(addrs)
if limit > 5 {
limit = 5
@@ -944,7 +959,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
ac.mu.Lock()
- if equalAddresses(ac.addrs, addrs) {
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@@ -963,7 +978,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
- if a.Equal(ac.curAddr) {
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@@ -989,11 +1004,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
- ac.mu.Unlock()
-
// Since we were connecting/connected, we should start a new connection
// attempt.
- go ac.resetTransport()
+ go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@@ -1149,10 +1162,15 @@ func (cc *ClientConn) Close() error {
<-cc.resolverWrapper.serializer.Done()
<-cc.balancerWrapper.serializer.Done()
-
+ var wg sync.WaitGroup
for ac := range conns {
- ac.tearDown(ErrClientConnClosing)
+ wg.Add(1)
+ go func(ac *addrConn) {
+ defer wg.Done()
+ ac.tearDown(ErrClientConnClosing)
+ }(ac)
}
+ wg.Wait()
cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
@@ -1187,8 +1205,7 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
- stateChan chan struct{} // closed and recreated on every state change.
+ state connectivity.State
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@@ -1201,9 +1218,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
- // When changing states, reset the state change channel.
- close(ac.stateChan)
- ac.stateChan = make(chan struct{})
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1211,7 +1225,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.acbw.updateState(s, lastErr)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1221,15 +1235,17 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
case transport.GoAwayTooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
- if v > ac.cc.mkp.Time {
- ac.cc.mkp.Time = v
+ if v > ac.cc.keepaliveParams.Time {
+ ac.cc.keepaliveParams.Time = v
}
ac.cc.mu.Unlock()
}
}
-func (ac *addrConn) resetTransport() {
- ac.mu.Lock()
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@@ -1260,6 +1276,8 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ // TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+ // to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
ac.mu.Lock()
if acCtx.Err() != nil {
@@ -1301,7 +1319,7 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
}
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
@@ -1314,7 +1332,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c
ac.mu.Lock()
ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+ ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
ac.cc.mu.RUnlock()
copts := ac.dopts.copts
@@ -1378,7 +1396,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
defer cancel()
copts.ChannelzParent = ac.channelz
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
+ newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
if err != nil {
if logger.V(2) {
logger.Infof("Creating new client transport to %q: %v", addr, err)
@@ -1452,7 +1470,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
if !ac.scopts.HealthCheckEnabled {
return
}
- healthCheckFunc := ac.cc.dopts.healthCheckFunc
+ healthCheckFunc := internal.HealthCheckFunc
if healthCheckFunc == nil {
// The health package is not imported to set health check function.
//
@@ -1484,7 +1502,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
}
// Start the health checking stream.
go func() {
- err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+ err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
@@ -1513,29 +1531,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
-// getTransport waits until the addrconn is ready and returns the transport.
-// If the context expires first, returns an appropriate status. If the
-// addrConn is stopped first, returns an Unavailable status error.
-func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
- for ctx.Err() == nil {
- ac.mu.Lock()
- t, state, sc := ac.transport, ac.state, ac.stateChan
- ac.mu.Unlock()
- if state == connectivity.Ready {
- return t, nil
- }
- if state == connectivity.Shutdown {
- return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
- }
-
- select {
- case <-ctx.Done():
- case <-sc:
- }
- }
- return nil, status.FromContextError(ctx.Err()).Err()
-}
-
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@@ -1582,7 +1577,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancelation of cc.ctx.
+ // closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
@@ -1673,22 +1668,19 @@ func (cc *ClientConn) connectionError() error {
return cc.lastConnectionError
}
-// parseTargetAndFindResolver parses the user's dial target and stores the
-// parsed target in `cc.parsedTarget`.
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
//
// The resolver to use is determined based on the scheme in the parsed target
// and the same is stored in `cc.resolverBuilder`.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) parseTargetAndFindResolver() error {
- channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target)
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+ logger.Infof("original dial target is: %q", cc.target)
var rb resolver.Builder
parsedTarget, err := parseTarget(cc.target)
- if err != nil {
- channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err)
- } else {
- channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget)
+ if err == nil {
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
@@ -1707,15 +1699,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
defScheme = resolver.GetDefaultScheme()
}
- channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme)
canonicalTarget := defScheme + ":///" + cc.target
parsedTarget, err = parseTarget(canonicalTarget)
if err != nil {
- channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err)
return err
}
- channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
@@ -1805,7 +1794,7 @@ func encodeAuthority(authority string) string {
// credentials do not match the authority configured through the dial option.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) determineAuthority() error {
+func (cc *ClientConn) initAuthority() error {
dopts := cc.dopts
// Historically, we had two options for users to specify the serverName or
// authority for a channel. One was through the transport credentials
@@ -1838,6 +1827,5 @@ func (cc *ClientConn) determineAuthority() error {
} else {
cc.authority = encodeAuthority(endpoint)
}
- channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
return nil
}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 411e3dfd4..959c2f99d 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.SliceBuffer(data)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
index 43726e877..7e4bfee88 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
@@ -49,7 +49,7 @@ func (k KeySizeError) Error() string {
// newRekeyAEAD creates a new instance of aes128gcm with rekeying.
// The key argument should be 44 bytes, the first 32 bytes are used as a key
-// for HKDF-expand and the remainining 12 bytes are used as a random mask for
+// for HKDF-expand and the remaining 12 bytes are used as a random mask for
// the counter.
func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
k := len(key)
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
index 6a9035ea2..b5bbb5497 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
@@ -51,7 +51,7 @@ type aes128gcmRekey struct {
// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
// for ALTS record. The key argument should be 44 bytes, the first 32 bytes
-// are used as a key for HKDF-expand and the remainining 12 bytes are used
+// are used as a key for HKDF-expand and the remaining 12 bytes are used
// as a random mask for the counter.
func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
index 0d64fb37a..f1ea7bb20 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
@@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) {
}
return n, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
index 6c867dd85..50721f690 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
@@ -128,7 +128,7 @@ type altsHandshaker struct {
// NewClientHandshaker creates a core.Handshaker that performs a client-side
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
// service in the metadata server.
-func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
+func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
return &altsHandshaker{
stream: nil,
conn: c,
@@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
// NewServerHandshaker creates a core.Handshaker that performs a server-side
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
// service in the metadata server.
-func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
+func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
return &altsHandshaker{
stream: nil,
conn: c,
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
index e1cdafb98..fbfde5d04 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
@@ -34,8 +34,6 @@ var (
// to a corresponding connection to a hypervisor handshaker service
// instance.
hsConnMap = make(map[string]*grpc.ClientConn)
- // hsDialer will be reassigned in tests.
- hsDialer = grpc.Dial
)
// Dial dials the handshake service in the hypervisor. If a connection has
@@ -49,8 +47,10 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) {
if !ok {
// Create a new connection to the handshaker service. Note that
// this connection stays open until the application is closed.
+ // Disable the service config to avoid unnecessary TXT record lookups that
+ // cause timeouts with some versions of systemd-resolved.
var err error
- hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDisableServiceConfig())
if err != nil {
return nil, err
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
index fe4488a95..ac9ed4f05 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.2
+// protoc-gen-go v1.36.4
+// protoc v5.27.1
// source: grpc/gcp/altscontext.proto
package grpc_gcp
@@ -28,6 +28,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -38,10 +39,7 @@ const (
)
type AltsContext struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The application protocol negotiated for this connection.
ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"`
// The record protocol negotiated for this connection.
@@ -55,16 +53,16 @@ type AltsContext struct {
// The RPC protocol versions supported by the peer.
PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"`
// Additional attributes of the peer.
- PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AltsContext) Reset() {
*x = AltsContext{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AltsContext) String() string {
@@ -75,7 +73,7 @@ func (*AltsContext) ProtoMessage() {}
func (x *AltsContext) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -141,7 +139,7 @@ func (x *AltsContext) GetPeerAttributes() map[string]string {
var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor
-var file_grpc_gcp_altscontext_proto_rawDesc = []byte{
+var file_grpc_gcp_altscontext_proto_rawDesc = string([]byte{
0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63,
0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72,
0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70,
@@ -186,22 +184,22 @@ var file_grpc_gcp_altscontext_proto_rawDesc = []byte{
0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67,
0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once
- file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc
+ file_grpc_gcp_altscontext_proto_rawDescData []byte
)
func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte {
file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() {
- file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData)
+ file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_gcp_altscontext_proto_rawDesc), len(file_grpc_gcp_altscontext_proto_rawDesc)))
})
return file_grpc_gcp_altscontext_proto_rawDescData
}
var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{
+var file_grpc_gcp_altscontext_proto_goTypes = []any{
(*AltsContext)(nil), // 0: grpc.gcp.AltsContext
nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry
(SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel
@@ -224,25 +222,11 @@ func file_grpc_gcp_altscontext_proto_init() {
return
}
file_grpc_gcp_transport_security_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AltsContext); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_gcp_altscontext_proto_rawDesc), len(file_grpc_gcp_altscontext_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
@@ -253,7 +237,6 @@ func file_grpc_gcp_altscontext_proto_init() {
MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes,
}.Build()
File_grpc_gcp_altscontext_proto = out.File
- file_grpc_gcp_altscontext_proto_rawDesc = nil
file_grpc_gcp_altscontext_proto_goTypes = nil
file_grpc_gcp_altscontext_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
index adbad6b2f..1caa2638c 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.2
+// protoc-gen-go v1.36.4
+// protoc v5.27.1
// source: grpc/gcp/handshaker.proto
package grpc_gcp
@@ -28,6 +28,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -139,26 +140,23 @@ func (NetworkProtocol) EnumDescriptor() ([]byte, []int) {
}
type Endpoint struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// IP address. It should contain an IPv4 or IPv6 string literal, e.g.
// "192.168.0.1" or "2001:db8::1".
IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
// Port number.
Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
// Network protocol (e.g., TCP, UDP) associated with this endpoint.
- Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"`
+ Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Endpoint) Reset() {
*x = Endpoint{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Endpoint) String() string {
@@ -169,7 +167,7 @@ func (*Endpoint) ProtoMessage() {}
func (x *Endpoint) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -206,26 +204,23 @@ func (x *Endpoint) GetProtocol() NetworkProtocol {
}
type Identity struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to IdentityOneof:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to IdentityOneof:
//
// *Identity_ServiceAccount
// *Identity_Hostname
IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"`
// Additional attributes of the identity.
- Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Identity) Reset() {
*x = Identity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Identity) String() string {
@@ -236,7 +231,7 @@ func (*Identity) ProtoMessage() {}
func (x *Identity) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -251,23 +246,27 @@ func (*Identity) Descriptor() ([]byte, []int) {
return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1}
}
-func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof {
- if m != nil {
- return m.IdentityOneof
+func (x *Identity) GetIdentityOneof() isIdentity_IdentityOneof {
+ if x != nil {
+ return x.IdentityOneof
}
return nil
}
func (x *Identity) GetServiceAccount() string {
- if x, ok := x.GetIdentityOneof().(*Identity_ServiceAccount); ok {
- return x.ServiceAccount
+ if x != nil {
+ if x, ok := x.IdentityOneof.(*Identity_ServiceAccount); ok {
+ return x.ServiceAccount
+ }
}
return ""
}
func (x *Identity) GetHostname() string {
- if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok {
- return x.Hostname
+ if x != nil {
+ if x, ok := x.IdentityOneof.(*Identity_Hostname); ok {
+ return x.Hostname
+ }
}
return ""
}
@@ -298,10 +297,7 @@ func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {}
func (*Identity_Hostname) isIdentity_IdentityOneof() {}
type StartClientHandshakeReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Handshake security protocol requested by the client.
HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"`
// The application protocols supported by the client, e.g., "h2" (for http2),
@@ -335,16 +331,16 @@ type StartClientHandshakeReq struct {
// ALTS connections. The access token that should be used to authenticate to
// the peer. The access token MUST be strongly bound to the ALTS credentials
// used to establish the connection that the token is sent over.
- AccessToken string `protobuf:"bytes,11,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"`
+ AccessToken string `protobuf:"bytes,11,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartClientHandshakeReq) Reset() {
*x = StartClientHandshakeReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StartClientHandshakeReq) String() string {
@@ -355,7 +351,7 @@ func (*StartClientHandshakeReq) ProtoMessage() {}
func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -448,10 +444,7 @@ func (x *StartClientHandshakeReq) GetAccessToken() string {
}
type ServerHandshakeParameters struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The record protocols supported by the server, e.g.,
// "ALTSRP_GCM_AES128".
RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"`
@@ -462,16 +455,16 @@ type ServerHandshakeParameters struct {
// ALTS connections. The token should be used to authenticate to
// the peer. The token MUST be strongly bound to the ALTS credentials
// used to establish the connection that the token is sent over.
- Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"`
+ Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerHandshakeParameters) Reset() {
*x = ServerHandshakeParameters{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServerHandshakeParameters) String() string {
@@ -482,7 +475,7 @@ func (*ServerHandshakeParameters) ProtoMessage() {}
func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -519,10 +512,7 @@ func (x *ServerHandshakeParameters) GetToken() string {
}
type StartServerHandshakeReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The application protocols supported by the server, e.g., "h2" (for http2),
// "grpc".
ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"`
@@ -531,9 +521,9 @@ type StartServerHandshakeReq struct {
// protocol (e.g., TLS or ALTS) has its own set of record protocols and local
// identities. Since protobuf does not support enum as key to the map, the key
// to handshake_parameters is the integer value of HandshakeProtocol enum.
- HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
- // that the peer's out_frames are split into multiple HandshakReq messages.
+ // that the peer's out_frames are split into multiple HandshakeReq messages.
InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
// (Optional) Local endpoint information of the connection to the client,
// such as local IP address, port number, and network protocol.
@@ -544,16 +534,16 @@ type StartServerHandshakeReq struct {
// (Optional) RPC protocol versions supported by the server.
RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"`
// (Optional) Maximum frame size supported by the server.
- MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"`
+ MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartServerHandshakeReq) Reset() {
*x = StartServerHandshakeReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StartServerHandshakeReq) String() string {
@@ -564,7 +554,7 @@ func (*StartServerHandshakeReq) ProtoMessage() {}
func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -629,10 +619,7 @@ func (x *StartServerHandshakeReq) GetMaxFrameSize() uint32 {
}
type NextHandshakeMessageReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
// that the peer's out_frames are split into multiple NextHandshakerMessageReq
// messages.
@@ -641,15 +628,15 @@ type NextHandshakeMessageReq struct {
// message to the peer and when the application received the current handshake
// message (in the in_bytes field) from the peer.
NetworkLatencyMs uint32 `protobuf:"varint,2,opt,name=network_latency_ms,json=networkLatencyMs,proto3" json:"network_latency_ms,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *NextHandshakeMessageReq) Reset() {
*x = NextHandshakeMessageReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *NextHandshakeMessageReq) String() string {
@@ -660,7 +647,7 @@ func (*NextHandshakeMessageReq) ProtoMessage() {}
func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -690,25 +677,22 @@ func (x *NextHandshakeMessageReq) GetNetworkLatencyMs() uint32 {
}
type HandshakerReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to ReqOneof:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to ReqOneof:
//
// *HandshakerReq_ClientStart
// *HandshakerReq_ServerStart
// *HandshakerReq_Next
- ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"`
+ ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HandshakerReq) Reset() {
*x = HandshakerReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HandshakerReq) String() string {
@@ -719,7 +703,7 @@ func (*HandshakerReq) ProtoMessage() {}
func (x *HandshakerReq) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -734,30 +718,36 @@ func (*HandshakerReq) Descriptor() ([]byte, []int) {
return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{6}
}
-func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof {
- if m != nil {
- return m.ReqOneof
+func (x *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof {
+ if x != nil {
+ return x.ReqOneof
}
return nil
}
func (x *HandshakerReq) GetClientStart() *StartClientHandshakeReq {
- if x, ok := x.GetReqOneof().(*HandshakerReq_ClientStart); ok {
- return x.ClientStart
+ if x != nil {
+ if x, ok := x.ReqOneof.(*HandshakerReq_ClientStart); ok {
+ return x.ClientStart
+ }
}
return nil
}
func (x *HandshakerReq) GetServerStart() *StartServerHandshakeReq {
- if x, ok := x.GetReqOneof().(*HandshakerReq_ServerStart); ok {
- return x.ServerStart
+ if x != nil {
+ if x, ok := x.ReqOneof.(*HandshakerReq_ServerStart); ok {
+ return x.ServerStart
+ }
}
return nil
}
func (x *HandshakerReq) GetNext() *NextHandshakeMessageReq {
- if x, ok := x.GetReqOneof().(*HandshakerReq_Next); ok {
- return x.Next
+ if x != nil {
+ if x, ok := x.ReqOneof.(*HandshakerReq_Next); ok {
+ return x.Next
+ }
}
return nil
}
@@ -788,10 +778,7 @@ func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {}
func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {}
type HandshakerResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The application protocol negotiated for this connection.
ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"`
// The record protocol negotiated for this connection.
@@ -811,16 +798,16 @@ type HandshakerResult struct {
// The RPC protocol versions supported by the peer.
PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"`
// The maximum frame size of the peer.
- MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"`
+ MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HandshakerResult) Reset() {
*x = HandshakerResult{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HandshakerResult) String() string {
@@ -831,7 +818,7 @@ func (*HandshakerResult) ProtoMessage() {}
func (x *HandshakerResult) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -903,23 +890,20 @@ func (x *HandshakerResult) GetMaxFrameSize() uint32 {
}
type HandshakerStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The status code. This could be the gRPC status code.
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// The status details.
- Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
+ Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HandshakerStatus) Reset() {
*x = HandshakerStatus{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HandshakerStatus) String() string {
@@ -930,7 +914,7 @@ func (*HandshakerStatus) ProtoMessage() {}
func (x *HandshakerStatus) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -960,10 +944,7 @@ func (x *HandshakerStatus) GetDetails() string {
}
type HandshakerResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Frames to be given to the peer for the NextHandshakeMessageReq. May be
// empty if no out_frames have to be sent to the peer or if in_bytes in the
// HandshakerReq are incomplete. All the non-empty out frames must be sent to
@@ -978,16 +959,16 @@ type HandshakerResp struct {
// to frames that needs to be forwarded to the peer.
Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"`
// Status of the handshaker.
- Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
+ Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HandshakerResp) Reset() {
*x = HandshakerResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *HandshakerResp) String() string {
@@ -998,7 +979,7 @@ func (*HandshakerResp) ProtoMessage() {}
func (x *HandshakerResp) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_handshaker_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1043,7 +1024,7 @@ func (x *HandshakerResp) GetStatus() *HandshakerStatus {
var File_grpc_gcp_handshaker_proto protoreflect.FileDescriptor
-var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
+var file_grpc_gcp_handshaker_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x73,
0x68, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70,
0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f,
@@ -1071,7 +1052,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10,
0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
- 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b,
0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
@@ -1108,156 +1089,157 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{
0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69,
0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61,
- 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72,
- 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72,
- 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
- 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65,
- 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74,
+ 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01,
+ 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf,
+ 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15,
+ 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52,
- 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
- 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64,
- 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f,
- 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f,
- 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
- 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74,
- 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63,
- 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b,
- 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
- 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20,
+ 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e,
+ 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
+ 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45,
+ 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e,
+ 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d,
+ 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48,
+ 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68,
+ 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c,
+ 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63,
+ 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a,
+ 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46,
+ 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73,
+ 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37,
+ 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64,
+ 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48,
+ 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
+ 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61,
+ 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65,
+ 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e,
+ 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72,
+ 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d,
+ 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
- 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74,
- 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62,
- 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f,
- 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42,
- 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
- 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
- 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79,
- 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
- 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73,
- 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70,
- 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
- 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53,
- 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65,
- 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a,
- 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48,
- 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12,
- 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61,
- 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
- 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b,
- 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b,
- 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12,
- 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
- 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65,
- 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e,
- 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72,
- 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73,
- 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72,
- 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73,
- 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63,
- 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
- 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a,
- 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d,
- 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06,
- 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63,
- 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
- 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f,
- 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04,
- 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
- 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54,
- 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e,
- 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54,
- 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a,
- 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e,
- 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70,
- 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72,
- 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f,
- 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63,
- 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
- 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+ 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74,
+ 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72,
+ 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46,
+ 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06,
+ 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64,
+ 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e,
+ 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02,
+ 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50,
+ 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07,
+ 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73,
+ 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b,
+ 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65,
+ 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
+ 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
+ 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48,
+ 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+ 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63,
+ 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
var (
file_grpc_gcp_handshaker_proto_rawDescOnce sync.Once
- file_grpc_gcp_handshaker_proto_rawDescData = file_grpc_gcp_handshaker_proto_rawDesc
+ file_grpc_gcp_handshaker_proto_rawDescData []byte
)
func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte {
file_grpc_gcp_handshaker_proto_rawDescOnce.Do(func() {
- file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_handshaker_proto_rawDescData)
+ file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_gcp_handshaker_proto_rawDesc), len(file_grpc_gcp_handshaker_proto_rawDesc)))
})
return file_grpc_gcp_handshaker_proto_rawDescData
}
var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
-var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{
+var file_grpc_gcp_handshaker_proto_goTypes = []any{
(HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol
(NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol
(*Endpoint)(nil), // 2: grpc.gcp.Endpoint
@@ -1312,134 +1294,12 @@ func file_grpc_gcp_handshaker_proto_init() {
return
}
file_grpc_gcp_transport_security_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Endpoint); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Identity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartClientHandshakeReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerHandshakeParameters); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartServerHandshakeReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NextHandshakeMessageReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HandshakerReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HandshakerResult); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HandshakerStatus); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HandshakerResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{
(*Identity_ServiceAccount)(nil),
(*Identity_Hostname)(nil),
}
- file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{}
- file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{
+ file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{}
+ file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{
(*HandshakerReq_ClientStart)(nil),
(*HandshakerReq_ServerStart)(nil),
(*HandshakerReq_Next)(nil),
@@ -1448,7 +1308,7 @@ func file_grpc_gcp_handshaker_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_gcp_handshaker_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_gcp_handshaker_proto_rawDesc), len(file_grpc_gcp_handshaker_proto_rawDesc)),
NumEnums: 2,
NumMessages: 12,
NumExtensions: 0,
@@ -1460,7 +1320,6 @@ func file_grpc_gcp_handshaker_proto_init() {
MessageInfos: file_grpc_gcp_handshaker_proto_msgTypes,
}.Build()
File_grpc_gcp_handshaker_proto = out.File
- file_grpc_gcp_handshaker_proto_rawDesc = nil
file_grpc_gcp_handshaker_proto_goTypes = nil
file_grpc_gcp_handshaker_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
index d1af55260..34443b1d2 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/gcp/handshaker.proto
package grpc_gcp
@@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe
// HandshakerServiceServer is the server API for HandshakerService service.
// All implementations must embed UnimplementedHandshakerServiceServer
-// for forward compatibility
+// for forward compatibility.
type HandshakerServiceServer interface {
// Handshaker service accepts a stream of handshaker request, returning a
// stream of handshaker response. Client is expected to send exactly one
@@ -87,14 +87,18 @@ type HandshakerServiceServer interface {
mustEmbedUnimplementedHandshakerServiceServer()
}
-// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedHandshakerServiceServer struct {
-}
+// UnimplementedHandshakerServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHandshakerServiceServer struct{}
func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error {
return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented")
}
func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {}
+func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {}
// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HandshakerServiceServer will
@@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface {
}
func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) {
+ // If the following call panics, it indicates UnimplementedHandshakerServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&HandshakerService_ServiceDesc, srv)
}
diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
index d65ffe6e7..7c533bd6c 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.2
+// protoc-gen-go v1.36.4
+// protoc v5.27.1
// source: grpc/gcp/transport_security_common.proto
package grpc_gcp
@@ -28,6 +28,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
const (
@@ -90,23 +91,20 @@ func (SecurityLevel) EnumDescriptor() ([]byte, []int) {
// Max and min supported RPC protocol versions.
type RpcProtocolVersions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Maximum supported RPC version.
MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"`
// Minimum supported RPC version.
MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RpcProtocolVersions) Reset() {
*x = RpcProtocolVersions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RpcProtocolVersions) String() string {
@@ -117,7 +115,7 @@ func (*RpcProtocolVersions) ProtoMessage() {}
func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -148,21 +146,18 @@ func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version {
// RPC version contains a major version and a minor version.
type RpcProtocolVersions_Version struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
- Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *RpcProtocolVersions_Version) Reset() {
*x = RpcProtocolVersions_Version{}
- if protoimpl.UnsafeEnabled {
- mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RpcProtocolVersions_Version) String() string {
@@ -173,7 +168,7 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {}
func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message {
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -204,7 +199,7 @@ func (x *RpcProtocolVersions_Version) GetMinor() uint32 {
var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor
-var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{
+var file_grpc_gcp_transport_security_common_proto_rawDesc = string([]byte{
0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73,
0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63,
@@ -237,23 +232,23 @@ var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{
0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once
- file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc
+ file_grpc_gcp_transport_security_common_proto_rawDescData []byte
)
func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte {
file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() {
- file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData)
+ file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_gcp_transport_security_common_proto_rawDesc), len(file_grpc_gcp_transport_security_common_proto_rawDesc)))
})
return file_grpc_gcp_transport_security_common_proto_rawDescData
}
var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{
+var file_grpc_gcp_transport_security_common_proto_goTypes = []any{
(SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel
(*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions
(*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version
@@ -273,37 +268,11 @@ func file_grpc_gcp_transport_security_common_proto_init() {
if File_grpc_gcp_transport_security_common_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RpcProtocolVersions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RpcProtocolVersions_Version); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_gcp_transport_security_common_proto_rawDesc), len(file_grpc_gcp_transport_security_common_proto_rawDesc)),
NumEnums: 1,
NumMessages: 2,
NumExtensions: 0,
@@ -315,7 +284,6 @@ func file_grpc_gcp_transport_security_common_proto_init() {
MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes,
}.Build()
File_grpc_gcp_transport_security_common_proto = out.File
- file_grpc_gcp_transport_security_common_proto_rawDesc = nil
file_grpc_gcp_transport_security_common_proto_goTypes = nil
file_grpc_gcp_transport_security_common_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go
index fbdf7dc29..5a9c9461f 100644
--- a/vendor/google.golang.org/grpc/credentials/google/google.go
+++ b/vendor/google.golang.org/grpc/credentials/google/google.go
@@ -22,7 +22,6 @@ package google
import (
"context"
"fmt"
- "time"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/alts"
@@ -31,7 +30,7 @@ import (
"google.golang.org/grpc/internal"
)
-const tokenRequestTimeout = 30 * time.Second
+const defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
var logger = grpclog.Component("credentials")
@@ -39,6 +38,9 @@ var logger = grpclog.Component("credentials")
type DefaultCredentialsOptions struct {
// PerRPCCreds is a per RPC credentials that is passed to a bundle.
PerRPCCreds credentials.PerRPCCredentials
+ // ALTSPerRPCCreds is a per RPC credentials that, if specified, will
+ // supercede PerRPCCreds above for and only for ALTS connections.
+ ALTSPerRPCCreds credentials.PerRPCCredentials
}
// NewDefaultCredentialsWithOptions returns a credentials bundle that is
@@ -47,14 +49,21 @@ type DefaultCredentialsOptions struct {
// This API is experimental.
func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle {
if opts.PerRPCCreds == nil {
- ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout)
- defer cancel()
var err error
- opts.PerRPCCreds, err = newADC(ctx)
+ // If the ADC ends up being Compute Engine Credentials, this context
+ // won't be used. Otherwise, the context dictates all the subsequent
+ // token requests via HTTP. So we cannot have any deadline or timeout.
+ opts.PerRPCCreds, err = newADC(context.TODO())
if err != nil {
logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err)
}
}
+ if opts.ALTSPerRPCCreds != nil {
+ opts.PerRPCCreds = &dualPerRPCCreds{
+ perRPCCreds: opts.PerRPCCreds,
+ altsPerRPCCreds: opts.ALTSPerRPCCreds,
+ }
+ }
c := &creds{opts: opts}
bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
if err != nil {
@@ -113,7 +122,7 @@ var (
return alts.NewClientCreds(alts.DefaultClientOptions())
}
newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) {
- return oauth.NewApplicationDefault(ctx)
+ return oauth.NewApplicationDefault(ctx, defaultCloudPlatformScope)
}
)
@@ -143,3 +152,27 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) {
return newCreds, nil
}
+
+// dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the
+// fallback PerRPCCredentials and the ALTS one. It pickes one of them based on
+// the channel type.
+type dualPerRPCCreds struct {
+ perRPCCreds credentials.PerRPCCredentials
+ altsPerRPCCreds credentials.PerRPCCredentials
+}
+
+func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ ri, ok := credentials.RequestInfoFromContext(ctx)
+ if !ok {
+ return nil, fmt.Errorf("request info not found from context")
+ }
+ if authType := ri.AuthInfo.AuthType(); authType == "alts" {
+ return d.altsPerRPCCreds.GetRequestMetadata(ctx, uri...)
+ }
+ // This ensures backward compatibility even if authType is not "tls".
+ return d.perRPCCreds.GetRequestMetadata(ctx, uri...)
+}
+
+func (d *dualPerRPCCreds) RequireTransportSecurity() bool {
+ return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity()
+}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 82bee1443..4c805c644 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
-func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
index d475cbc08..328b838ed 100644
--- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -38,7 +38,7 @@ type TokenSource struct {
}
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
-func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
token, err := ts.Token()
if err != nil {
return nil, err
@@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
return oauthAccess{token: *token}
}
-func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
ri, _ := credentials.RequestInfoFromContext(ctx)
if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err)
@@ -156,7 +156,7 @@ type serviceAccount struct {
t *oauth2.Token
}
-func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) {
s.mu.Lock()
defer s.mu.Unlock()
if !s.t.Valid() {
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 5dafd34ed..bd5fe22b6 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -27,9 +27,15 @@ import (
"net/url"
"os"
+ "google.golang.org/grpc/grpclog"
credinternal "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/envconfig"
)
+const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
+
+var logger = grpclog.Component("credentials")
+
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
@@ -112,6 +118,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
conn.Close()
return nil, nil, ctx.Err()
}
+
+ // The negotiated protocol can be either of the following:
+ // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+ // it is the only protocol advertised by the client during the handshake.
+ // The tls library ensures that the server chooses a protocol advertised
+ // by the client.
+ // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+ // for using HTTP/2 over TLS. We can terminate the connection immediately.
+ np := conn.ConnectionState().NegotiatedProtocol
+ if np == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+ }
+ logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+ }
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
@@ -131,8 +153,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
conn.Close()
return nil, nil, err
}
+ cs := conn.ConnectionState()
+ // The negotiated application protocol can be empty only if the client doesn't
+ // support ALPN. In such cases, we can close the connection since ALPN is required
+ // for using HTTP/2 over TLS.
+ if cs.NegotiatedProtocol == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+ } else if logger.V(2) {
+ logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+ }
+ }
tlsInfo := TLSInfo{
- State: conn.ConnectionState(),
+ State: cs,
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
@@ -168,25 +202,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
- tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+ config := applyDefaults(c)
+ if config.GetConfigForClient != nil {
+ oldFn := config.GetConfigForClient
+ config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+ cfgForClient, err := oldFn(hello)
+ if err != nil || cfgForClient == nil {
+ return cfgForClient, err
+ }
+ return applyDefaults(cfgForClient), nil
+ }
+ }
+ return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+ config := credinternal.CloneTLSConfig(c)
+ config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
// If the user did not configure a MinVersion and did not configure a
// MaxVersion < 1.2, use MinVersion=1.2, which is required by
// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
- if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
- tc.config.MinVersion = tls.VersionTLS12
+ if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+ config.MinVersion = tls.VersionTLS12
}
// If the user did not configure CipherSuites, use all "secure" cipher
// suites reported by the TLS package, but remove some explicitly forbidden
// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
- if tc.config.CipherSuites == nil {
+ if config.CipherSuites == nil {
for _, cs := range tls.CipherSuites() {
if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
- tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
+ config.CipherSuites = append(config.CipherSuites, cs.ID)
}
}
}
- return tc
+ return config
}
// NewClientTLSFromCert constructs TLS credentials from the provided root
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 00273702b..405a2ffeb 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -21,6 +21,7 @@ package grpc
import (
"context"
"net"
+ "net/url"
"time"
"google.golang.org/grpc/backoff"
@@ -32,10 +33,16 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
+const (
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+ defaultMaxCallAttempts = 5
+)
+
func init() {
internal.AddGlobalDialOptions = func(opt ...DialOption) {
globalDialOptions = append(globalDialOptions, opt...)
@@ -43,10 +50,18 @@ func init() {
internal.ClearGlobalDialOptions = func() {
globalDialOptions = nil
}
+ internal.AddGlobalPerTargetDialOptions = func(opt any) {
+ if ptdo, ok := opt.(perTargetDialOption); ok {
+ globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+ }
+ }
+ internal.ClearGlobalPerTargetDialOptions = func() {
+ globalPerTargetDialOptions = nil
+ }
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithRecvBufferPool = withRecvBufferPool
+ internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -58,7 +73,7 @@ type dialOptions struct {
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
- cp Compressor
+ compressorV0 Compressor
dc Decompressor
bs internalbackoff.Strategy
block bool
@@ -72,14 +87,15 @@ type dialOptions struct {
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
- healthCheckFunc internal.HealthChecker
minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
- recvBufferPool SharedBufferPool
defaultScheme string
+ maxCallAttempts int
+ enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled.
+ useProxy bool // Specifies if a server should be connected via proxy.
}
// DialOption configures how we set up the connection.
@@ -89,6 +105,19 @@ type DialOption interface {
var globalDialOptions []DialOption
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+ // DialOption returns a Dial Option to apply.
+ DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
@@ -229,7 +258,7 @@ func WithCodec(c Codec) DialOption {
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
func WithCompressor(cp Compressor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.cp = cp
+ o.compressorV0 = cp
})
}
@@ -350,7 +379,22 @@ func WithInsecure() DialOption {
// later release.
func WithNoProxy() DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UseProxy = false
+ o.useProxy = false
+ })
+}
+
+// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
+// specified in the environment. By default, the server name is provided
+// directly to the proxy as part of the CONNECT handshake. This is ignored if
+// WithNoProxy is used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithLocalDNSResolution() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.enableLocalDNSResolution = true
})
}
@@ -401,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption {
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
//
+// Note that gRPC by default performs name resolution on the target passed to
+// NewClient. To bypass name resolution and cause the target string to be
+// passed directly to the dialer here instead, use the "passthrough" resolver
+// by specifying it in the target string, e.g. "passthrough:target".
+//
// Note: All supported releases of Go (as of December 2023) override the OS
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
@@ -408,7 +457,7 @@ func WithTimeout(d time.Duration) DialOption {
// option to true from the Control field. For a concrete example of how to do
// this, see internal.NetDialerWithTCPKeepalive().
//
-// For more information, please see [issue 23459] in the Go github repo.
+// For more information, please see [issue 23459] in the Go GitHub repo.
//
// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
@@ -417,10 +466,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
})
}
-func init() {
- internal.WithHealthCheckFunc = withHealthCheckFunc
-}
-
// WithDialer returns a DialOption that specifies a function to use for dialing
// network addresses. If FailOnNonTempDialError() is set to true, and an error
// is returned by f, gRPC checks the error's Temporary() method to decide if it
@@ -490,6 +535,8 @@ func WithUserAgent(s string) DialOption {
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@@ -632,29 +679,20 @@ func WithDisableHealthCheck() DialOption {
})
}
-// withHealthCheckFunc replaces the default health check function with the
-// provided one. It makes tests easier to change the health check function.
-//
-// For testing purpose only.
-func withHealthCheckFunc(f internal.HealthChecker) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.healthCheckFunc = f
- })
-}
-
func defaultDialOptions() dialOptions {
return dialOptions{
copts: transport.ConnectOptions{
ReadBufferSize: defaultReadBufSize,
WriteBufferSize: defaultWriteBufSize,
- UseProxy: true,
UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
- bs: internalbackoff.DefaultExponential,
- healthCheckFunc: internal.HealthCheckFunc,
- idleTimeout: 30 * time.Minute,
- recvBufferPool: nopBufferPool{},
- defaultScheme: "dns",
+ bs: internalbackoff.DefaultExponential,
+ idleTimeout: 30 * time.Minute,
+ defaultScheme: "dns",
+ maxCallAttempts: defaultMaxCallAttempts,
+ useProxy: true,
+ enableLocalDNSResolution: false,
}
}
@@ -712,25 +750,25 @@ func WithIdleTimeout(d time.Duration) DialOption {
})
}
-// WithRecvBufferPool returns a DialOption that configures the ClientConn
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
- return withRecvBufferPool(bufferPool)
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ if n < 2 {
+ n = defaultMaxCallAttempts
+ }
+ o.maxCallAttempts = n
+ })
}
-func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.recvBufferPool = bufferPool
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 0022859ad..e7b532b6f 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 5ebf88d71..11d0ae142 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -94,7 +94,7 @@ type Codec interface {
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 000000000..074c5e234
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66d5cdf03..ceec319dd 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"fmt"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)
@@ -32,28 +33,51 @@ import (
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-func (codec) Marshal(v any) ([]byte, error) {
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
vv := messageV2Of(v)
if vv == nil {
- return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ size := proto.Size(vv)
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
}
-func (codec) Unmarshal(data []byte, v any) error {
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
vv := messageV2Of(v)
if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
- return proto.Unmarshal(data, vv)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
}
func messageV2Of(v any) proto.Message {
@@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message {
return nil
}
-func (codec) Name() string {
+func (c *codecV2) Name() string {
return Name
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 000000000..ad75313a1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/stats"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = stats.NewMetricSet()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name string
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[string]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[string]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metricName string) *MetricDescriptor {
+ return metricsRegistry[metricName]
+}
+
+func registerMetric(metricName string, def bool) {
+ if registeredMetrics[metricName] {
+ logger.Fatalf("metric %v already registered", metricName)
+ }
+ registeredMetrics[metricName] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(metricName)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[string]bool)
+ metricsRegistry = make(map[string]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 000000000..ee1423605
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "google.golang.org/grpc/stats"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
+// Metrics will be deleted in a future release.
+type Metrics = stats.MetricSet
+
+// Metric was replaced by direct usage of strings.
+type Metric = string
+
+// NewMetrics is an experimental legacy alias of the now-stable
+// stats.NewMetricSet. NewMetrics will be deleted in a future release.
+func NewMetrics(metrics ...Metric) *Metrics {
+ return stats.NewMetricSet(metrics...)
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
index ac73c9ced..f1ae080dc 100644
--- a/vendor/google.golang.org/grpc/grpclog/component.go
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
-
- "google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.InfoDepth(depth+1, args...)
+ InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.WarningDepth(depth+1, args...)
+ WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.ErrorDepth(depth+1, args...)
+ ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.FatalDepth(depth+1, args...)
+ FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16928c9cb..db320105e 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
import (
"os"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
func init() {
@@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return grpclog.Logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
- grpclog.Logger.Warning(args...)
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
- grpclog.Logger.Warningf(format, args...)
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
- grpclog.Logger.Warningln(args...)
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
- grpclog.Logger.Error(args...)
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
- grpclog.Logger.Errorf(format, args...)
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
- grpclog.Logger.Errorln(args...)
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
- grpclog.Logger.Fatal(args...)
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
- grpclog.Logger.Fatalf(format, args...)
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
- grpclog.Logger.Fatalln(args...)
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
rename to vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
index 6635f7bca..59c03bc14 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2022 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,17 +16,11 @@
*
*/
-package grpcsync
+// Package internal contains functionality internal to the grpclog package.
+package internal
-import (
- "sync"
-)
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
-// OnceFunc returns a function wrapping f which ensures f is only executed
-// once even if the returned function is executed multiple times.
-func OnceFunc(f func()) func() {
- var once sync.Once
- return func() {
- once.Do(f)
- }
-}
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 000000000..e524fdd40
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
new file mode 100644
index 000000000..ed90060c3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -0,0 +1,267 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+ // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+ Info(args ...any)
+ // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+ Infoln(args ...any)
+ // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+ Infof(format string, args ...any)
+ // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+ Warning(args ...any)
+ // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+ Warningln(args ...any)
+ // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+ Warningf(format string, args ...any)
+ // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ Error(args ...any)
+ // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ Errorln(args ...any)
+ // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ Errorf(format string, args ...any)
+ // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatal(args ...any)
+ // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalln(args ...any)
+ // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalf(format string, args ...any)
+ // V reports whether verbosity level l is at least the requested verbose level.
+ V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+ LoggerV2
+ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ InfoDepth(depth int, args ...any)
+ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ WarningDepth(depth int, args ...any)
+ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ ErrorDepth(depth int, args ...any)
+ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ FatalDepth(depth int, args ...any)
+}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// sprintf is fmt.Sprintf.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintf = fmt.Sprintf
+
+// sprint is fmt.Sprint.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprint = fmt.Sprint
+
+// sprintln is fmt.Sprintln.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintln = fmt.Sprintln
+
+// exit is os.Exit.
+// This var exists to make it possible to test functions calling os.Exit.
+var exit = os.Exit
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, sevStr+": "+s)
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) printf(severity int, format string, args ...any) {
+ // Note the discard check is duplicated in each print func, rather than in
+ // output, to avoid the expensive Sprint calls.
+ // De-duplicating this by moving to output would be a significant performance regression!
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprintf(format, args...))
+}
+
+func (g *loggerT) print(severity int, v ...any) {
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprint(v...))
+}
+
+func (g *loggerT) println(severity int, v ...any) {
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprintln(v...))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.print(infoLog, args...)
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.println(infoLog, args...)
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.printf(infoLog, format, args...)
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.print(warningLog, args...)
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.println(warningLog, args...)
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.printf(warningLog, format, args...)
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.print(errorLog, args...)
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.println(errorLog, args...)
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.printf(errorLog, format, args...)
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.print(fatalLog, args...)
+ exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.println(fatalLog, args...)
+ exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.printf(fatalLog, format, args...)
+ exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// combineLoggers returns a combined logger for both higher & lower severity logs,
+// or only one if the other is io.Discard.
+//
+// This uses io.Discard instead of io.MultiWriter when all loggers
+// are set to io.Discard. Both this package and the standard log package have
+// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
+// this writing).
+func combineLoggers(lower, higher io.Writer) io.Writer {
+ if lower == io.Discard {
+ return higher
+ }
+ if higher == io.Discard {
+ return lower
+ }
+ return io.MultiWriter(lower, higher)
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+
+ warningW = combineLoggers(infoW, warningW)
+ errorW = combineLoggers(errorW, warningW)
+
+ fatalW := errorW
+
+ m := []*log.Logger{
+ log.New(infoW, "", flag),
+ log.New(warningW, "", flag),
+ log.New(errorW, "", flag),
+ log.New(fatalW, "", flag),
+ }
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index b1674d826..4b2035857 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,70 +18,17 @@
package grpclog
-import "google.golang.org/grpc/internal/grpclog"
+import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- grpclog.Logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index ecfd36d71..892dc13d1 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,52 +19,16 @@
package grpclog
import (
- "encoding/json"
- "fmt"
"io"
- "log"
"os"
"strconv"
"strings"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
- grpclog.Logger = l
- grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
-}
-
-type loggerV2Config struct {
- verbose int
- jsonFormat bool
-}
-
-func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
- var m []*log.Logger
- flag := log.LstdFlags
- if c.jsonFormat {
- flag = 0
- }
- m = append(m, log.New(infoW, "", flag))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, "", flag))
- m = append(m, log.New(ew, "", flag))
- return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
- verbose: v,
- jsonFormat: jsonFormat,
- })
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
})
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.output(infoLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.output(infoLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.output(infoLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.output(warningLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.output(warningLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.output(warningLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Error(args ...any) {
- g.output(errorLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.output(errorLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.output(errorLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.output(fatalLog, fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.output(fatalLog, fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.output(fatalLog, fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
@@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index fed1c011a..b6ae7f258 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -25,10 +25,10 @@ package backoff
import (
"context"
"errors"
+ rand "math/rand/v2"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/internal/grpcrand"
)
// Strategy defines the methodology for backing off after a grpc connection
@@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
- backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+ backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
if backoff < 0 {
return 0
}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
index 13821a926..85540f86a 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -33,6 +33,8 @@ type lbConfig struct {
childConfig serviceconfig.LoadBalancingConfig
}
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
func ChildName(l serviceconfig.LoadBalancingConfig) string {
return l.(*lbConfig).childBuilder.Name()
}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index 73bb4c4ee..fbc1ca356 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error
return nil, errBalancerClosed
}
bw := &balancerWrapper{
- builder: builder,
- gsb: gsb,
+ ClientConn: gsb.cc,
+ builder: builder,
+ gsb: gsb,
lastState: balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
@@ -293,6 +294,7 @@ func (gsb *Balancer) Close() {
// State updates from the wrapped balancer can result in invocation of the
// graceful switch logic.
type balancerWrapper struct {
+ balancer.ClientConn
balancer.Balancer
gsb *Balancer
builder balancer.Builder
@@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver
bw.gsb.mu.Unlock()
bw.gsb.cc.UpdateAddresses(sc, addrs)
}
-
-func (bw *balancerWrapper) Target() string {
- return bw.gsb.cc.Target()
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index aa4505a87..966932891 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
index d7e9e1d54..3ec662799 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/channel.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -43,6 +43,8 @@ type Channel struct {
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
traceRefCount int32
+ // ChannelMetrics holds connectivity state, target and call metrics for the
+ // channel within channelz.
ChannelMetrics ChannelMetrics
}
@@ -50,6 +52,8 @@ type Channel struct {
// nesting.
func (c *Channel) channelzIdentifier() {}
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
func (c *Channel) String() string {
if c.Parent == nil {
return fmt.Sprintf("Channel #%d", c.ID)
@@ -61,24 +65,31 @@ func (c *Channel) id() int64 {
return c.ID
}
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
func (c *Channel) SubChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.subChans)
}
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
func (c *Channel) NestedChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.nestedChans)
}
+// Trace returns a copy of the Channel's trace data.
func (c *Channel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()
return c.trace.copy()
}
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
type ChannelMetrics struct {
// The current connectivity state of the channel.
State atomic.Pointer[connectivity.State]
@@ -136,12 +147,16 @@ func strFromPointer(s *string) string {
return *s
}
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
func (c *ChannelMetrics) String() string {
return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
)
}
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
c := &ChannelMetrics{}
c.State.Store(&state)
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
index dfe18b089..64c791953 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -46,7 +46,7 @@ type entry interface {
// channelMap is the storage data structure for channelz.
//
-// Methods of channelMap can be divided in two two categories with respect to
+// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
@@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
if maxResults <= 0 {
maxResults = EntriesPerPage
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 03e24e150..078bb8123 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -33,7 +33,7 @@ var (
// outside this package except by tests.
IDGen IDGenerator
- db *channelMap = newChannelMap()
+ db = newChannelMap()
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
EntriesPerPage = 50
curState int32
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
index cdfc49d6e..b5a824992 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/server.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se
return sm
}
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
sm.CallsStarted.Store(o.CallsStarted.Load())
sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
index fa64834b2..90103847c 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/socket.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct {
RemoteFlowControlWindow int64
}
+// SocketType represents the type of socket.
type SocketType string
+// SocketType can be one of these.
const (
SocketTypeNormal = "NormalSocket"
SocketTypeListen = "ListenSocket"
)
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
type Socket struct {
Entity
SocketType SocketType
@@ -100,6 +105,8 @@ type Socket struct {
Security credentials.ChannelzSecurityValue
}
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
func (ls *Socket) String() string {
return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
index 3b88e4cba..b20802e6e 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 {
return sc.ID
}
+// Sockets returns a copy of the sockets map associated with the SubChannel.
func (sc *SubChannel) Sockets() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(sc.sockets)
}
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
func (sc *SubChannel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
index d1ed8df6a..0e6e18e18 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -35,13 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
+func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c any) *SocketOptionData {
+func GetSocketOption(any) *SocketOptionData {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
index 36b867403..2bffe4777 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -79,13 +79,21 @@ type TraceEvent struct {
Parent *TraceEvent
}
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
type ChannelTrace struct {
- cm *channelMap
- clearCalled bool
+ cm *channelMap
+ clearCalled bool
+ // The time when the trace was created.
CreationTime time.Time
- EventNum int64
- mu sync.Mutex
- Events []*traceEvent
+ // A counter for the number of events recorded in the
+ // trace.
+ EventNum int64
+ mu sync.Mutex
+ // A slice of traceEvent pointers representing the events recorded for
+ // this channel.
+ Events []*traceEvent
}
func (c *ChannelTrace) copy() *ChannelTrace {
@@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{
RefNormalSocket: "NormalSocket",
}
+// String returns a string representation of the RefChannelType
func (r RefChannelType) String() string {
return refChannelTypeToString[r]
}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 9c915d9e4..1e42b6fdc 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -40,6 +40,21 @@ var (
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+ // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+ // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+ // option is present for backward compatibility. This option may be overridden
+ // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+ // or "false".
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+ // XDSFallbackSupport is the env variable that controls whether support for
+ // xDS fallback is turned on. If this is unset or is false, only the first
+ // xDS server in the list of server configs will be used.
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
+ // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+ // instead of the exiting pickfirst implementation. This can be enabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+ // to "true".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 29f234acb..2eb97f832 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -53,4 +53,14 @@ var (
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+
+ // XDSDualstackEndpointsEnabled is true if gRPC should read the
+ // "additional addresses" in the xDS endpoint resource.
+ XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true)
+
+ // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use
+ // the system's default root certificates for TLS certificate validation.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
+ XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
index 7f7044e17..7617be215 100644
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -18,11 +18,11 @@
package internal
var (
- // WithRecvBufferPool is implemented by the grpc package and returns a dial
+ // WithBufferPool is implemented by the grpc package and returns a dial
// option to configure a shared buffer pool for a grpc.ClientConn.
- WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
- // RecvBufferPool is implemented by the grpc package and returns a server
+ // BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
- RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
)
diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
index 6717b757f..43423d8ad 100644
--- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
+++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
@@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool {
name = strings.TrimSpace(name)
return name == "Google" || name == "Google Compute Engine"
case "windows":
- name = strings.Replace(name, " ", "", -1)
- name = strings.Replace(name, "\n", "", -1)
- name = strings.Replace(name, "\r", "", -1)
+ name = strings.ReplaceAll(name, " ", "")
+ name = strings.ReplaceAll(name, "\n", "")
+ name = strings.ReplaceAll(name, "\r", "")
return name == "Google"
default:
return false
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
deleted file mode 100644
index bfc45102a..000000000
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *
- * Copyright 2020 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpclog (internal) defines depth logging for grpc.
-package grpclog
-
-import (
- "os"
-)
-
-// Logger is the logger used for the non-depth log functions.
-var Logger LoggerV2
-
-// DepthLogger is the logger used for the depth log functions.
-var DepthLogger DepthLoggerV2
-
-// InfoDepth logs to the INFO log at the specified depth.
-func InfoDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.InfoDepth(depth, args...)
- } else {
- Logger.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-func WarningDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.WarningDepth(depth, args...)
- } else {
- Logger.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-func ErrorDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.ErrorDepth(depth, args...)
- } else {
- Logger.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-func FatalDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.FatalDepth(depth, args...)
- } else {
- Logger.Fatalln(args...)
- }
- os.Exit(1)
-}
-
-// LoggerV2 does underlying logging work for grpclog.
-// This is a copy of the LoggerV2 defined in the external grpclog package. It
-// is defined here to avoid a circular dependency.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
-
-// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
-// DepthLoggerV2, the below functions will be called with the appropriate stack
-// depth set for trivial functions the logger may ignore.
-// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
-// It is defined here to avoid a circular dependency.
-//
-// # Experimental
-//
-// Notice: This type is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type DepthLoggerV2 interface {
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
index faa998de7..092ad187a 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -16,17 +16,21 @@
*
*/
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
+
+ "google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
- logger DepthLoggerV2
+ logger grpclog.DepthLoggerV2
prefix string
}
@@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
- InfoDepth(1, fmt.Sprintf(format, args...))
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
- WarningDepth(1, fmt.Sprintf(format, args...))
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
- ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Debugf does info logging at verbose level 2.
-func (pl *PrefixLogger) Debugf(format string, args ...any) {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- if !Logger.V(2) {
- return
- }
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- InfoDepth(1, fmt.Sprintf(format, args...))
-
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- return Logger.V(l)
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 0126d6b51..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,100 +0,0 @@
-//go:build !go1.21
-
-// TODO: when this file is deleted (after Go 1.20 support is dropped), delete
-// all of grpcrand and call the rand package directly.
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int implements rand.Int on the grpcrand global source.
-func Int() int {
- mu.Lock()
- defer mu.Unlock()
- return r.Int()
-}
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int63n(n)
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- defer mu.Unlock()
- return r.Intn(n)
-}
-
-// Int31n implements rand.Int31n on the grpcrand global source.
-func Int31n(n int32) int32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int31n(n)
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Float64()
-}
-
-// Uint64 implements rand.Uint64 on the grpcrand global source.
-func Uint64() uint64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint64()
-}
-
-// Uint32 implements rand.Uint32 on the grpcrand global source.
-func Uint32() uint32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint32()
-}
-
-// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
-func ExpFloat64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.ExpFloat64()
-}
-
-// Shuffle implements rand.Shuffle on the grpcrand global source.
-var Shuffle = func(n int, f func(int, int)) {
- mu.Lock()
- defer mu.Unlock()
- r.Shuffle(n, f)
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go
deleted file mode 100644
index c37299af1..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go
+++ /dev/null
@@ -1,73 +0,0 @@
-//go:build go1.21
-
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import "math/rand"
-
-// This implementation will be used for Go version 1.21 or newer.
-// For older versions, the original implementation with mutex will be used.
-
-// Int implements rand.Int on the grpcrand global source.
-func Int() int {
- return rand.Int()
-}
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- return rand.Int63n(n)
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- return rand.Intn(n)
-}
-
-// Int31n implements rand.Int31n on the grpcrand global source.
-func Int31n(n int32) int32 {
- return rand.Int31n(n)
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- return rand.Float64()
-}
-
-// Uint64 implements rand.Uint64 on the grpcrand global source.
-func Uint64() uint64 {
- return rand.Uint64()
-}
-
-// Uint32 implements rand.Uint32 on the grpcrand global source.
-func Uint32() uint32 {
- return rand.Uint32()
-}
-
-// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
-func ExpFloat64() float64 {
- return rand.ExpFloat64()
-}
-
-// Shuffle implements rand.Shuffle on the grpcrand global source.
-var Shuffle = func(n int, f func(int, int)) {
- rand.Shuffle(n, f)
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index f7f40a16a..8e8e86128 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
-// Schedule adds a callback to be scheduled after existing callbacks are run.
+// TrySchedule tries to schedule the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
//
-// Return value indicates if the callback was successfully added to the list of
-// callbacks to be executed by the serializer. It is not possible to add
-// callbacks once the context passed to NewCallbackSerializer is cancelled.
-func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- return cs.callbacks.Put(f) == nil
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
+ }
}
func (cs *CallbackSerializer) run(ctx context.Context) {
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
index aef8cec1a..6d8c2f518 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
index ec62b4775..683d1955c 100644
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
}
// baseContentType is the base content-type for gRPC. This is a valid
-// content-type on it's own, but can also include a content-subtype such as
+// content-type on its own, but can also include a content-subtype such as
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index fe49cb74c..2c13ee9da 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool {
return true
}
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
func (m *Manager) EnterIdleModeForTesting() {
m.tryEnterIdleMode()
}
@@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
- // here. The first one to get the lock would got the channel to exit idle.
+ // here. The first one to get the lock would get the channel to exit idle.
// - Channel is not in idle mode, and the user calls Connect which calls
// m.ExitIdleMode.
//
@@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
+// Close stops the timer associated with the Manager, if it exists.
func (m *Manager) Close() {
atomic.StoreInt32(&m.closed, 1)
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 48d24bdb4..13e1f386b 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -29,10 +29,12 @@ import (
)
var (
- // WithHealthCheckFunc is set by dialoptions.go
- WithHealthCheckFunc any // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
+ // RegisterClientHealthCheckListener is used to provide a listener for
+ // updates from the client-side health checking service. It returns a
+ // function that can be called to stop the health producer.
+ RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func()
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
@@ -62,6 +64,9 @@ var (
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
+ // MetricsRecorderForServer returns the MetricsRecorderList derived from a
+ // server's stats handlers.
+ MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
//
@@ -106,6 +111,14 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalDialOptions func()
+
+ // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+ // configured for newly created ClientConns.
+ AddGlobalPerTargetDialOptions any // func (opt any)
+ // ClearGlobalPerTargetDialOptions clears the slice of global late apply
+ // dial options.
+ ClearGlobalPerTargetDialOptions func()
+
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
@@ -126,7 +139,8 @@ var (
// deleted or changed.
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
- // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
+ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+ // provided grpc.ClientConn.
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
@@ -140,6 +154,34 @@ var (
// other features, including the CSDS service.
NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
+ // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder
+ // using the provided xDS pool instead of creating a new one using the
+ // bootstrap configuration specified by the supported environment variables.
+ // The resolver.Builder is meant to be used in conjunction with the
+ // grpc.WithResolvers DialOption. The resolver.Builder does not take
+ // ownership of the provided xDS client and it is the responsibility of the
+ // caller to close the client when no longer required.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error)
+
+ // NewXDSResolverWithClientForTesting creates a new xDS resolver builder
+ // using the provided xDS client instead of creating a new one using the
+ // bootstrap configuration specified by the supported environment variables.
+ // The resolver.Builder is meant to be used in conjunction with the
+ // grpc.WithResolvers DialOption. The resolver.Builder does not take
+ // ownership of the provided xDS client and it is the responsibility of the
+ // caller to close the client when no longer required.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
+
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
// variable.
@@ -174,7 +216,7 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
- GRPCResolverSchemeExtraMetadata string = "xds"
+ GRPCResolverSchemeExtraMetadata = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
EnterIdleModeForTesting any // func(*grpc.ClientConn)
@@ -182,31 +224,49 @@ var (
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+ // ChannelzTurnOffForTesting disables the Channelz service for testing
+ // purposes.
ChannelzTurnOffForTesting func()
- // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
- // error for a given resource type and name. This is usually triggered when
- // the associated watch timer fires. For testing purposes, having this
- // function makes events more predictable than relying on timer events.
- TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
+ // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+ // invoke resource-not-found error for the given resource type and name.
+ TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
- // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client
- // singleton to invoke resource not found for a resource type name and
- // resource name.
- TriggerXDSResourceNameNotFoundClient any // func(string, string) error
-
- // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of
+ // metadata.rawMD.
FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
- // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme.
- UserSetDefaultScheme bool = false
+ // UserSetDefaultScheme is set to true if the user has overridden the
+ // default resolver scheme.
+ UserSetDefaultScheme = false
+
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
)
-// HealthChecker defines the signature of the client-side LB channel health checking function.
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
@@ -228,3 +288,15 @@ const (
// It currently has an experimental suffix which would be removed once
// end-to-end testing of the policy is completed.
const RLSLoadBalancingPolicyName = "rls_experimental"
+
+// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
+// embedding.
+type EnforceSubConnEmbedding interface {
+ enforceSubConnEmbedding()
+}
+
+// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation
+// embedding.
+type EnforceClientConnEmbedding interface {
+ enforceClientConnEmbedding()
+}
diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
new file mode 100644
index 000000000..1f61f1a49
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proxyattributes contains functions for getting and setting proxy
+// attributes like the CONNECT address and user info.
+package proxyattributes
+
+import (
+ "net/url"
+
+ "google.golang.org/grpc/resolver"
+)
+
+type keyType string
+
+const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions")
+
+// Options holds the proxy connection details needed during the CONNECT
+// handshake.
+type Options struct {
+ User *url.Userinfo
+ ConnectAddr string
+}
+
+// Set returns a copy of addr with opts set in its attributes.
+func Set(addr resolver.Address, opts Options) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts)
+ return addr
+}
+
+// Get returns the Options for the proxy [resolver.Address] and a boolean
+// value representing if the attribute is present or not. The returned data
+// should not be mutated.
+func Get(addr resolver.Address) (Options, bool) {
+ if a := addr.Attributes.Value(proxyOptionsKey); a != nil {
+ return a.(Options), true
+ }
+ return Options{}, false
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
new file mode 100644
index 000000000..a6c647013
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -0,0 +1,329 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package delegatingresolver implements a resolver capable of resolving both
+// target URIs and proxy addresses.
+package delegatingresolver
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+var (
+ logger = grpclog.Component("delegating-resolver")
+ // HTTPSProxyFromEnvironment will be overwritten in the tests
+ HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+// delegatingResolver manages both target URI and proxy address resolution by
+// delegating these tasks to separate child resolvers. Essentially, it acts as
+// a intermediary between the gRPC ClientConn and the child resolvers.
+//
+// It implements the [resolver.Resolver] interface.
+type delegatingResolver struct {
+ target resolver.Target // parsed target URI to be resolved
+ cc resolver.ClientConn // gRPC ClientConn
+ targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
+ proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
+ proxyURL *url.URL // proxy URL, derived from proxy environment and target
+
+ mu sync.Mutex // protects all the fields below
+ targetResolverState *resolver.State // state of the target resolver
+ proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
+}
+
+// nopResolver is a resolver that does nothing.
+type nopResolver struct{}
+
+func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (nopResolver) Close() {}
+
+// proxyURLForTarget determines the proxy URL for the given address based on
+// the environment. It can return the following:
+// - nil URL, nil error: No proxy is configured or the address is excluded
+// using the `NO_PROXY` environment variable or if req.URL.Host is
+// "localhost" (with or without // a port number)
+// - nil URL, non-nil error: An error occurred while retrieving the proxy URL.
+// - non-nil URL, nil error: A proxy is configured, and the proxy URL was
+// retrieved successfully without any errors.
+func proxyURLForTarget(address string) (*url.URL, error) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: address,
+ }}
+ return HTTPSProxyFromEnvironment(req)
+}
+
+// New creates a new delegating resolver that can create up to two child
+// resolvers:
+// - one to resolve the proxy address specified using the supported
+// environment variables. This uses the registered resolver for the "dns"
+// scheme.
+// - one to resolve the target URI using the resolver specified by the scheme
+// in the target URI or specified by the user using the WithResolvers dial
+// option. As a special case, if the target URI's scheme is "dns" and a
+// proxy is specified using the supported environment variables, the target
+// URI's path portion is used as the resolved address unless target
+// resolution is enabled using the dial option.
+func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
+ r := &delegatingResolver{
+ target: target,
+ cc: cc,
+ }
+
+ var err error
+ r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ }
+
+ // proxy is not configured or proxy address excluded using `NO_PROXY` env
+ // var, so only target resolver is used.
+ if r.proxyURL == nil {
+ return targetResolverBuilder.Build(target, cc, opts)
+ }
+
+ if logger.V(2) {
+ logger.Infof("Proxy URL detected : %s", r.proxyURL)
+ }
+
+ // When the scheme is 'dns' and target resolution on client is not enabled,
+ // resolution should be handled by the proxy, not the client. Therefore, we
+ // bypass the target resolver and store the unresolved target address.
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled {
+ state := resolver.State{
+ Addresses: []resolver.Address{{Addr: target.Endpoint()}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ }
+ r.targetResolverState = &state
+ } else {
+ wcc := &wrappingClientConn{
+ stateListener: r.updateTargetResolverState,
+ parent: r,
+ }
+ if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
+ return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
+ }
+ }
+
+ if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil {
+ return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err)
+ }
+
+ if r.targetResolver == nil {
+ r.targetResolver = nopResolver{}
+ }
+ if r.proxyResolver == nil {
+ r.proxyResolver = nopResolver{}
+ }
+ return r, nil
+}
+
+// proxyURIResolver creates a resolver for resolving proxy URIs using the
+// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and
+// builds a resolver with a wrappingClientConn to capture resolved addresses.
+func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
+ proxyBuilder := resolver.Get("dns")
+ if proxyBuilder == nil {
+ panic("delegating_resolver: resolver for proxy not found for scheme dns")
+ }
+ url := *r.proxyURL
+ url.Scheme = "dns"
+ url.Path = "/" + r.proxyURL.Host
+ url.Host = "" // Clear the Host field to conform to the "dns:///" format
+
+ proxyTarget := resolver.Target{URL: url}
+ wcc := &wrappingClientConn{
+ stateListener: r.updateProxyResolverState,
+ parent: r,
+ }
+ return proxyBuilder.Build(proxyTarget, wcc, opts)
+}
+
+func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
+ r.targetResolver.ResolveNow(o)
+ r.proxyResolver.ResolveNow(o)
+}
+
+func (r *delegatingResolver) Close() {
+ r.targetResolver.Close()
+ r.targetResolver = nil
+
+ r.proxyResolver.Close()
+ r.proxyResolver = nil
+}
+
+// updateClientConnStateLocked creates a list of combined addresses by
+// pairing each proxy address with every target address. For each pair, it
+// generates a new [resolver.Address] using the proxy address, and adding the
+// target address as the attribute along with user info. It returns nil if
+// either resolver has not sent update even once and returns the error from
+// ClientConn update once both resolvers have sent update atleast once.
+func (r *delegatingResolver) updateClientConnStateLocked() error {
+ if r.targetResolverState == nil || r.proxyAddrs == nil {
+ return nil
+ }
+
+ curState := *r.targetResolverState
+ // If multiple resolved proxy addresses are present, we send only the
+ // unresolved proxy host and let net.Dial handle the proxy host name
+ // resolution when creating the transport. Sending all resolved addresses
+ // would increase the number of addresses passed to the ClientConn and
+ // subsequently to load balancing (LB) policies like Round Robin, leading
+ // to additional TCP connections. However, if there's only one resolved
+ // proxy address, we send it directly, as it doesn't affect the address
+ // count returned by the target resolver and the address count sent to the
+ // ClientConn.
+ var proxyAddr resolver.Address
+ if len(r.proxyAddrs) == 1 {
+ proxyAddr = r.proxyAddrs[0]
+ } else {
+ proxyAddr = resolver.Address{Addr: r.proxyURL.Host}
+ }
+ var addresses []resolver.Address
+ for _, targetAddr := range (*r.targetResolverState).Addresses {
+ addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+
+ // Create a list of combined endpoints by pairing all proxy endpoints
+ // with every target endpoint. Each time, it constructs a new
+ // [resolver.Endpoint] using the all addresses from all the proxy endpoint
+ // and the target addresses from one endpoint. The target address and user
+ // information from the proxy URL are added as attributes to the proxy
+ // address.The resulting list of addresses is then grouped into endpoints,
+ // covering all combinations of proxy and target endpoints.
+ var endpoints []resolver.Endpoint
+ for _, endpt := range (*r.targetResolverState).Endpoints {
+ var addrs []resolver.Address
+ for _, proxyAddr := range r.proxyAddrs {
+ for _, targetAddr := range endpt.Addresses {
+ addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+ }
+ endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs})
+ }
+ // Use the targetResolverState for its service config and attributes
+ // contents. The state update is only sent after both the target and proxy
+ // resolvers have sent their updates, and curState has been updated with
+ // the combined addresses.
+ curState.Addresses = addresses
+ curState.Endpoints = endpoints
+ return r.cc.UpdateState(curState)
+}
+
+// updateProxyResolverState updates the proxy resolver state by storing proxy
+// addresses and endpoints, marking the resolver as ready, and triggering a
+// state update if both proxy and target resolvers are ready. If the ClientConn
+// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It
+// is a StateListener function of wrappingClientConn passed to the proxy resolver.
+func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if logger.V(2) {
+ logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
+ }
+ if len(state.Endpoints) > 0 {
+ // We expect exactly one address per endpoint because the proxy
+ // resolver uses "dns" resolution.
+ r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
+ for _, endpoint := range state.Endpoints {
+ r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
+ }
+ } else if state.Addresses != nil {
+ r.proxyAddrs = state.Addresses
+ } else {
+ r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received
+ }
+ err := r.updateClientConnStateLocked()
+ // Another possible approach was to block until updates are received from
+ // both resolvers. But this is not used because calling `New()` triggers
+ // `Build()` for the first resolver, which calls `UpdateState()`. And the
+ // second resolver hasn't sent an update yet, so it would cause `New()` to
+ // block indefinitely.
+ if err != nil {
+ r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ return err
+}
+
+// updateTargetResolverState updates the target resolver state by storing target
+// addresses, endpoints, and service config, marking the resolver as ready, and
+// triggering a state update if both resolvers are ready. If the ClientConn
+// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It
+// is a StateListener function of wrappingClientConn passed to the target resolver.
+func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if logger.V(2) {
+ logger.Infof("Addresses received from target resolver: %v", state.Addresses)
+ }
+ r.targetResolverState = &state
+ err := r.updateClientConnStateLocked()
+ if err != nil {
+ r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ return nil
+}
+
+// wrappingClientConn serves as an intermediary between the parent ClientConn
+// and the child resolvers created here. It implements the resolver.ClientConn
+// interface and is passed in that capacity to the child resolvers.
+type wrappingClientConn struct {
+ // Callback to deliver resolver state updates
+ stateListener func(state resolver.State) error
+ parent *delegatingResolver
+}
+
+// UpdateState receives resolver state updates and forwards them to the
+// appropriate listener function (either for the proxy or target resolver).
+func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
+ return wcc.stateListener(state)
+}
+
+// ReportError intercepts errors from the child resolvers and passes them to ClientConn.
+func (wcc *wrappingClientConn) ReportError(err error) {
+ wcc.parent.cc.ReportError(err)
+}
+
+// NewAddress intercepts the new resolved address from the child resolvers and
+// passes them to ClientConn.
+func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
+ wcc.UpdateState(resolver.State{Addresses: addrs})
+}
+
+// ParseServiceConfig parses the provided service config and returns an
+// object that provides the parsed config.
+func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
+ return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index f3f52a59a..ba5c5a95d 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -24,7 +24,9 @@ import (
"context"
"encoding/json"
"fmt"
+ rand "math/rand/v2"
"net"
+ "net/netip"
"os"
"strconv"
"strings"
@@ -35,7 +37,6 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
@@ -63,6 +64,8 @@ var (
func init() {
resolver.Register(NewBuilder())
internal.TimeAfterFunc = time.After
+ internal.TimeNowFunc = time.Now
+ internal.TimeUntilFunc = time.Until
internal.NewNetResolver = newNetResolver
internal.AddressDialer = addressDialer
}
@@ -120,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
}
// IP address.
- if ipAddr, ok := formatIP(host); ok {
+ if ipAddr, err := formatIP(host); err == nil {
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
cc.UpdateState(resolver.State{Addresses: addr})
return deadResolver{}, nil
@@ -175,7 +178,7 @@ type dnsResolver struct {
// finished. Otherwise, data race will be possible. [Race Example] in
// dns_resolver_test we replace the real lookup functions with mocked ones to
// facilitate testing. If Close() doesn't wait for watcher() goroutine
- // finishes, race detector sometimes will warns lookup (READ the lookup
+ // finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
@@ -209,12 +212,12 @@ func (d *dnsResolver) watcher() {
err = d.cc.UpdateState(*state)
}
- var waitTime time.Duration
+ var nextResolutionTime time.Time
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- waitTime = MinResolutionInterval
+ nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
select {
case <-d.ctx.Done():
return
@@ -223,19 +226,21 @@ func (d *dnsResolver) watcher() {
} else {
// Poll on an error found in DNS Resolver or an error received from
// ClientConn.
- waitTime = backoff.DefaultExponential.Backoff(backoffIndex)
+ nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
select {
case <-d.ctx.Done():
return
- case <-internal.TimeAfterFunc(waitTime):
+ case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
}
}
}
func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
- if !EnableSRVLookups {
+ // Skip this particular host to avoid timeouts with some versions of
+ // systemd-resolved.
+ if !EnableSRVLookups || d.host == "metadata.google.internal." {
return nil, nil
}
var newAddrs []resolver.Address
@@ -256,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error)
return nil, err
}
for _, a := range lbAddrs {
- ip, ok := formatIP(a)
- if !ok {
- return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+ ip, err := formatIP(a)
+ if err != nil {
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
}
addr := ip + ":" + strconv.Itoa(int(s.Port))
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
@@ -318,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error
}
newAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
- ip, ok := formatIP(a)
- if !ok {
- return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+ ip, err := formatIP(a)
+ if err != nil {
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
}
addr := ip + ":" + d.port
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
@@ -347,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
return &state, nil
}
-// formatIP returns ok = false if addr is not a valid textual representation of
-// an IP address. If addr is an IPv4 address, return the addr and ok = true.
+// formatIP returns an error if addr is not a valid textual representation of
+// an IP address. If addr is an IPv4 address, return the addr and error = nil.
// If addr is an IPv6 address, return the addr enclosed in square brackets and
-// ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
+// error = nil.
+func formatIP(addr string) (string, error) {
+ ip, err := netip.ParseAddr(addr)
+ if err != nil {
+ return "", err
}
- if ip.To4() != nil {
- return addr, true
+ if ip.Is4() {
+ return addr, nil
}
- return "[" + addr + "]", true
+ return "[" + addr + "]", nil
}
// parseTarget takes the user input target string and default port, returns
@@ -375,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
return "", "", internal.ErrMissingAddr
}
- if ip := net.ParseIP(target); ip != nil {
+ if _, err := netip.ParseAddr(target); err == nil {
// target is an IPv4 or IPv6(without brackets) address
return target, defaultPort, nil
}
@@ -423,7 +428,7 @@ func chosenByPercentage(a *int) bool {
if a == nil {
return true
}
- return grpcrand.Intn(100)+1 <= *a
+ return rand.IntN(100)+1 <= *a
}
func canaryingSC(js string) string {
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
index a7ecaf8d5..c0eae4f5f 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -51,11 +51,22 @@ var (
// The following vars are overridden from tests.
var (
// TimeAfterFunc is used by the DNS resolver to wait for the given duration
- // to elapse. In non-test code, this is implemented by time.After. In test
+ // to elapse. In non-test code, this is implemented by time.After. In test
// code, this can be used to control the amount of time the resolver is
// blocked waiting for the duration to elapse.
TimeAfterFunc func(time.Duration) <-chan time.Time
+ // TimeNowFunc is used by the DNS resolver to get the current time.
+ // In non-test code, this is implemented by time.Now. In test code,
+ // this can be used to control the current time for the resolver.
+ TimeNowFunc func() time.Time
+
+ // TimeUntilFunc is used by the DNS resolver to calculate the remaining
+ // wait time for re-resolution. In non-test code, this is implemented by
+ // time.Until. In test code, this can be used to control the remaining
+ // time for resolver to wait for re-resolution.
+ TimeUntilFunc func(time.Time) time.Duration
+
// NewNetResolver returns the net.Resolver instance for the given target.
NewNetResolver func(string) (NetResolver, error)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index afac56572..b901c7bac 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 000000000..fd33af51a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+ // TelemetryLabels are the telemetry labels to record.
+ TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+ labels, _ := ctx.Value(labelsKey{}).(*Labels)
+ return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+ // could also append
+ return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 000000000..79044657b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index c7dbc8205..1186f1e9a 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
- any, err := anypb.New(protoadapt.MessageV2Of(detail))
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
- p.Details = append(p.Details, any)
+ p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
func (s *Status) Details() []any {
if s == nil || s.s == nil {
return nil
@@ -160,7 +162,38 @@ func (s *Status) Details() []any {
details = append(details, err)
continue
}
- details = append(details, detail)
+ // The call to MessageV1Of is required to unwrap the proto message if
+ // it implemented only the MessageV1 API. The proto message would have
+ // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+ // added to a global registry used by any.UnmarshalNew().
+ // MessageV1Of has the following behaviour:
+ // 1. If the given message is a wrapped MessageV1, it returns the
+ // unwrapped value.
+ // 2. If the given message already implements MessageV1, it returns it
+ // as is.
+ // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+ //
+ // Since the Status.WithDetails() API only accepts MessageV1, calling
+ // MessageV1Of ensures we return the same type that was given to
+ // WithDetails:
+ // * If the give type implemented only MessageV1, the unwrapping from
+ // point 1 above will restore the type.
+ // * If the given type implemented both MessageV1 and MessageV2, point 2
+ // above will ensure no wrapping is performed.
+ // * If the given type implemented only MessageV2 and was wrapped using
+ // MessageV1Of before passing to WithDetails(), it would be unwrapped
+ // in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+ // that the type is wrapped in a MessageV1 wrapper again before
+ // returning. Note that protoc-gen-go doesn't generate code which
+ // implements ONLY MessageV2 at the time of writing.
+ //
+ // NOTE: Status details can also be added using the FromProto method.
+ // This could theoretically allow passing a Detail message that only
+ // implements the V2 API. In such a case the message will be wrapped in
+ // a MessageV1 wrapper when fetched using Details().
+ // Since protoc-gen-go generates only code that implements both V1 and
+ // V2 APIs for backward compatibility, this is not a concern.
+ details = append(details, protoadapt.MessageV1Of(detail))
}
return details
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index 999f52cd7..54c24c2ff 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
index 078137b7f..7e7aaa546 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
index fd7d43a89..d5c1085ee 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
new file mode 100644
index 000000000..8ed347c54
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "sync/atomic"
+
+ "golang.org/x/net/http2"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// ClientStream implements streaming functionality for a gRPC client.
+type ClientStream struct {
+ *Stream // Embed for common stream functionality.
+
+ ct *http2Client
+ done chan struct{} // closed at the end of stream to unblock writers.
+ doneFunc func() // invoked at the end of stream.
+
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ // headerValid indicates whether a valid header was received. Only
+ // meaningful after headerChan is closed (always call waitOnHeader() before
+ // reading its value).
+ headerValid bool
+ header metadata.MD // the received header metadata
+ noHeaders bool // set if the client never received headers (set only after the stream is done).
+
+ bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+ unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
+
+ status *status.Status // the status error received from the server
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
+ b, err := s.Stream.read(n)
+ if err == nil {
+ s.ct.incrMsgRecv()
+ }
+ return b, err
+}
+
+// Close closes the stream and popagates err to any readers.
+func (s *ClientStream) Close(err error) {
+ var (
+ rst bool
+ rstCode http2.ErrCode
+ )
+ if err != nil {
+ rst = true
+ rstCode = http2.ErrCodeCancel
+ }
+ s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+ return s.ct.write(s, hdr, data, opts)
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *ClientStream) BytesReceived() bool {
+ return s.bytesReceived.Load()
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *ClientStream) Unprocessed() bool {
+ return s.unprocessed.Load()
+}
+
+func (s *ClientStream) waitOnHeader() {
+ select {
+ case <-s.ctx.Done():
+ // Close the stream to prevent headers/trailers from changing after
+ // this function returns.
+ s.Close(ContextErr(s.ctx.Err()))
+ // headerChan could possibly not be closed yet if closeStream raced
+ // with operateHeaders; wait until it is closed explicitly here.
+ <-s.headerChan
+ case <-s.headerChan:
+ }
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ClientStream) RecvCompress() string {
+ s.waitOnHeader()
+ return s.recvCompress
+}
+
+// Done returns a channel which is closed when it receives the final status
+// from the server.
+func (s *ClientStream) Done() <-chan struct{} {
+ return s.done
+}
+
+// Header returns the header metadata of the stream. Acquires the key-value
+// pairs of header metadata once it is available. It blocks until i) the
+// metadata is ready or ii) there is no header metadata or iii) the stream is
+// canceled/expired.
+func (s *ClientStream) Header() (metadata.MD, error) {
+ s.waitOnHeader()
+
+ if !s.headerValid || s.noHeaders {
+ return nil, s.status.Err()
+ }
+
+ return s.header.Copy(), nil
+}
+
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only. If the stream ends
+// before headers are received, returns true, nil.
+func (s *ClientStream) TrailersOnly() bool {
+ s.waitOnHeader()
+ return s.noHeaders
+}
+
+// Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, after Done() is closed.
+func (s *ClientStream) Status() *status.Status {
+ return s.status
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 3deadfb4a..ef72fbb3a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
- d []byte
+ reader mem.Reader
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
- var wakeUp bool
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
if !f() { // f wasn't successful
- c.mu.Unlock()
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- c.trfChan.Store(make(chan struct{}))
+ ch := make(chan struct{})
+ c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(chan struct{})
- close(ch)
- c.trfChan.Store((chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ _ = v.reader.Close()
}
}
+
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
- ch, _ := c.trfChan.Load().(chan struct{})
+ ch := c.trfChan.Swap(nil)
if ch != nil {
- close(ch)
+ close(*ch)
}
- c.trfChan.Store((chan struct{})(nil))
- c.mu.Unlock()
}
type side int
@@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -490,12 +511,13 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
conn: conn,
logger: logger,
ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ _ = df.reader.Close()
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possible HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
- var (
- buf []byte
- )
+
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, len(dataItem.d))
- if hSize != 0 {
- if dSize == 0 {
- buf = dataItem.h
- } else {
- // We can add some data to grpc message header to distribute bytes more equally across frames.
- // Copy on the stack to avoid generating garbage
- var localBuf [http2MaxFrameLen]byte
- copy(localBuf[:hSize], dataItem.h)
- copy(localBuf[hSize:], dataItem.d[:dSize])
- buf = localBuf[:hSize+dSize]
- }
+ dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+ remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
} else {
- buf = dataItem.d
- }
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
- size := hSize + dSize
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = dataItem.reader.Read((*buf)[hSize:])
+ }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
- dataItem.d = dataItem.d[dSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index 97198c515..dfc0f224e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 {
func (f *trInFlow) onData(n uint32) uint32 {
f.unacked += n
- if f.unacked >= f.limit/4 {
- w := f.unacked
- f.unacked = 0
+ if f.unacked < f.limit/4 {
f.updateEffectiveWindowSize()
- return w
+ return 0
}
- f.updateEffectiveWindowSize()
- return 0
+ return f.reset()
}
func (f *trInFlow) reset() uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 4a3ddce29..3dea23573 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@@ -171,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@@ -222,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error {
}
}
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
ht.writeStatusMu.Lock()
defer ht.writeStatusMu.Unlock()
@@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
@@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
- s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -286,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
// writePendingHeaders sets common and custom headers on the first
// write call (Write, WriteHeader, or WriteStatus)
-func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
+func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
ht.writeCommonHeaders(s)
ht.writeCustomHeaders(s)
}
// writeCommonHeaders sets common headers on the first write
// call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
+func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
h := ht.rw.Header()
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
h.Set("Content-Type", ht.contentType)
@@ -314,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
// writeCustomHeaders sets custom headers set on the stream via SetHeader
// on the first write call (Write, WriteHeader, or WriteStatus)
-func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
+func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
h := ht.rw.Header()
s.hdrMu.Lock()
@@ -330,19 +333,31 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
headersWritten := s.updateHeaderSent()
- return ht.do(func() {
+ err := ht.do(func() {
+ defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
- ht.rw.Write(data)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
ht.rw.(http.Flusher).Flush()
})
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
}
-func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
if err := s.SetHeader(md); err != nil {
return err
}
@@ -370,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
return err
}
-func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var cancel context.CancelFunc
if ht.timeoutSet {
@@ -393,20 +408,22 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
- s := &Stream{
- id: 0, // irrelevant
- ctx: ctx,
- requestRead: func(int) {},
+ s := &ServerStream{
+ Stream: &Stream{
+ id: 0, // irrelevant
+ ctx: ctx,
+ requestRead: func(int) {},
+ buf: newRecvBuffer(),
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ contentSubtype: ht.contentSubtype,
+ },
cancel: cancel,
- buf: newRecvBuffer(),
st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -415,21 +432,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -458,11 +473,9 @@ func (ht *serverHandlerTransport) runStream() {
}
}
-func (ht *serverHandlerTransport) IncrMsgSent() {}
-
-func (ht *serverHandlerTransport) IncrMsgRecv() {}
+func (ht *serverHandlerTransport) incrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain(debugData string) {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
@@ -485,5 +498,5 @@ func mapRecvMsgError(err error) error {
if strings.Contains(err.Error(), "body closed by handler") {
return status.Error(codes.Canceled, err.Error())
}
- return connectionErrorf(true, err, err.Error())
+ return connectionErrorf(true, err, "%s", err.Error())
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 3c63c7069..513dbb93d 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -43,10 +43,12 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
+ "google.golang.org/grpc/internal/proxyattributes"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -59,6 +61,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
@@ -83,9 +87,9 @@ type http2Client struct {
writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
- goAway chan struct{}
-
- framer *framer
+ goAway chan struct{}
+ keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+ framer *framer
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
// Do not access controlBuf with mu held.
@@ -120,7 +124,7 @@ type http2Client struct {
mu sync.Mutex // guard the following variables
nextID uint32
state transportState
- activeStreams map[uint32]*Stream
+ activeStreams map[uint32]*ClientStream
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
@@ -144,13 +148,13 @@ type http2Client struct {
onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
}
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) {
address := addr.Addr
networkType, ok := networktype.Get(addr)
if fn != nil {
@@ -174,8 +178,8 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
if !ok {
networkType, address = parseDialTarget(address)
}
- if networkType == "tcp" && useProxy {
- return proxyDial(ctx, address, grpcUA)
+ if opts, present := proxyattributes.Get(addr); present {
+ return proxyDial(ctx, addr, grpcUA, opts)
}
return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
}
@@ -196,10 +200,10 @@ func isTemporary(err error) bool {
return true
}
-// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
+func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@@ -214,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
// address specific arbitrary data to reach custom dialers and credential handshakers.
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
- conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
+ conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
@@ -229,7 +233,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
- // The following defer and goroutine monitor the connectCtx for cancelation
+ // The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@@ -332,10 +336,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
+ keepaliveDone: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
- activeStreams: make(map[uint32]*Stream),
+ activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
@@ -346,7 +351,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
onClose: onClose,
}
var czSecurity credentials.ChannelzSecurityValue
@@ -463,7 +468,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@@ -476,17 +481,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return t, nil
}
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
- s := &Stream{
- ct: t,
- done: make(chan struct{}),
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- headerChan: make(chan struct{}),
- contentSubtype: callHdr.ContentSubtype,
- doneFunc: callHdr.DoneFunc,
+ s := &ClientStream{
+ Stream: &Stream{
+ method: callHdr.Method,
+ sendCompress: callHdr.SendCompress,
+ buf: newRecvBuffer(),
+ contentSubtype: callHdr.ContentSubtype,
+ },
+ ct: t,
+ done: make(chan struct{}),
+ headerChan: make(chan struct{}),
+ doneFunc: callHdr.DoneFunc,
}
s.wq = newWriteQuota(defaultWriteQuota, s.done)
s.requestRead = func(n int) {
@@ -502,9 +509,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
ctxDone: s.ctx.Done(),
recv: s.buf,
closeStream: func(err error) {
- t.CloseStream(s, err)
+ s.Close(err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -525,8 +531,9 @@ func (t *http2Client) getPeer() *peer.Peer {
// to be the last frame loopy writes to the transport.
func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
t.mu.Lock()
- defer t.mu.Unlock()
- if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
+ maxStreamID := t.nextID - 2
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
return false, g.closeConn
@@ -593,12 +600,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
for k, v := range callAuthData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
- if b := stats.OutgoingTags(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
- }
- if b := stats.OutgoingTrace(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
- }
if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
@@ -734,7 +735,7 @@ func (e NewStreamError) Error() string {
// NewStream creates a stream and registers it into the transport as "active"
// streams. All non-nil errors returned will be *NewStreamError.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
// ServerName field of the resolver returned address takes precedence over
@@ -759,7 +760,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return
}
// The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
+ s.unprocessed.Store(true)
s.write(recvMsg{err: err})
close(s.done)
// If headerChan isn't closed, then close it.
@@ -770,7 +771,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@@ -904,21 +905,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return s, nil
}
-// CloseStream clears the footprint of a stream when the stream is not needed any more.
-// This must not be executed in reader's goroutine.
-func (t *http2Client) CloseStream(s *Stream, err error) {
- var (
- rst bool
- rstCode http2.ErrCode
- )
- if err != nil {
- rst = true
- rstCode = http2.ErrCodeCancel
- }
- t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
+func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
// Set stream status to done.
if s.swapState(streamDone) == streamDone {
// If it was already done, return. If multiple closeStream calls
@@ -983,6 +970,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// only once on a transport. Once it is called, the transport should not be
// accessed anymore.
func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1005,18 +993,33 @@ func (t *http2Client) Close(err error) {
// should unblock it so that the goroutine eventually exits.
t.kpDormancyCond.Signal()
}
+ // Append info about previous goaways if there were any, since this may be important
+ // for understanding the root cause for this connection to be closed.
+ goAwayDebugMessage := t.goAwayDebugMessage
t.mu.Unlock()
+
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
- // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
- <-t.writerDone
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+ }
t.cancel()
t.conn.Close()
+ // Waits for the reader and keepalive goroutines to exit before returning to
+ // ensure all resources are cleaned up before Close can return.
+ <-t.readerDone
+ if t.keepaliveEnabled {
+ <-t.keepaliveDone
+ }
channelz.RemoveEntry(t.channelz.ID)
- // Append info about previous goaways if there were any, since this may be important
- // for understanding the root cause for this connection to be closed.
- _, goAwayDebugMessage := t.GetGoAwayReason()
-
var st *status.Status
if len(goAwayDebugMessage) > 0 {
st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
@@ -1065,30 +1068,40 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+ reader := data.Reader()
+
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
+ _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
+ _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- d: data,
+ reader: reader,
}
- if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return err
}
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ t.incrMsgSent()
+ return nil
}
-func (t *http2Client) getStream(f http2.Frame) *Stream {
+func (t *http2Client) getStream(f http2.Frame) *ClientStream {
t.mu.Lock()
s := t.activeStreams[f.Header().StreamID]
t.mu.Unlock()
@@ -1098,7 +1111,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream {
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
-func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -1107,7 +1120,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) {
// updateWindow adjusts the inbound quota for the stream.
// Window updates will be sent out when the cumulative quota
// exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *Stream, n uint32) {
+func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -1190,10 +1203,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1210,7 +1226,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
}
if f.ErrCode == http2.ErrCodeRefusedStream {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
+ s.unprocessed.Store(true)
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
@@ -1222,7 +1238,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@@ -1291,11 +1307,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
t.controlBuf.put(pingAck)
}
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return
+ return nil
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
@@ -1307,8 +1323,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
- return
+ return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
}
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
@@ -1325,8 +1340,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
- return
+ return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
}
default:
t.setGoAwayReason(f)
@@ -1350,15 +1364,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.prevGoAwayID = id
if len(t.activeStreams) == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
- return
+ return connectionErrorf(true, nil, "received goaway and there are no active streams")
}
- streamsToClose := make([]*Stream, 0)
+ streamsToClose := make([]*ClientStream, 0)
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&stream.unprocessed, 1)
+ stream.unprocessed.Store(true)
streamsToClose = append(streamsToClose, stream)
}
}
@@ -1368,6 +1381,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for _, stream := range streamsToClose {
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
+ return nil
}
// setGoAwayReason sets the value of t.goAwayReason based
@@ -1409,7 +1423,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
endStream := frame.StreamEnded()
- atomic.StoreUint32(&s.bytesReceived, 1)
+ s.bytesReceived.Store(true)
initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
if !initialHeader && !endStream {
@@ -1603,7 +1617,13 @@ func (t *http2Client) readServerPreface() error {
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
- defer close(t.readerDone)
+ var errClose error
+ defer func() {
+ close(t.readerDone)
+ if errClose != nil {
+ t.Close(errClose)
+ }
+ }()
if err := t.readServerPreface(); err != nil {
errCh <- err
@@ -1642,11 +1662,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
- return
}
+ // Transport error.
+ errClose = connectionErrorf(true, err, "error reading from server: %v", err)
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1660,7 +1679,7 @@ func (t *http2Client) reader(errCh chan<- error) {
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
- t.handleGoAway(frame)
+ errClose = t.handleGoAway(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
@@ -1671,15 +1690,15 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
+ var err error
+ defer func() {
+ close(t.keepaliveDone)
+ if err != nil {
+ t.Close(err)
+ }
+ }()
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
@@ -1703,7 +1722,7 @@ func (t *http2Client) keepalive() {
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+ err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
return
}
t.mu.Lock()
@@ -1745,7 +1764,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():
@@ -1774,14 +1793,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
-func (t *http2Client) IncrMsgSent() {
- t.channelz.SocketMetrics.MessagesSent.Add(1)
- t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
+func (t *http2Client) incrMsgSent() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
+ }
}
-func (t *http2Client) IncrMsgRecv() {
- t.channelz.SocketMetrics.MessagesReceived.Add(1)
- t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
+func (t *http2Client) incrMsgRecv() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
+ }
}
func (t *http2Client) getOutFlowWindow() int64 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index cab0e2d3d..997b0a59b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io"
"math"
+ rand "math/rand/v2"
"net"
"net/http"
"strconv"
@@ -38,12 +39,12 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -110,7 +111,7 @@ type http2Server struct {
// already initialized since draining is already underway.
drainEvent *grpcsync.Event
state transportState
- activeStreams map[uint32]*Stream
+ activeStreams map[uint32]*ServerStream
// idle is the time instant when the connection went idle.
// This is either the beginning of the connection or when the number of
// RPCs go down to 0.
@@ -119,7 +120,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelz *channelz.Socket
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
@@ -255,13 +256,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
- activeStreams: make(map[uint32]*Stream),
+ activeStreams: make(map[uint32]*ServerStream),
stats: config.StatsHandlers,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- bufferPool: newBufferPool(),
+ bufferPool: config.BufferPool,
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
@@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@@ -358,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -384,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.maxStreamID = streamID
buf := newRecvBuffer()
- s := &Stream{
- id: streamID,
+ s := &ServerStream{
+ Stream: &Stream{
+ id: streamID,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ },
st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
headerWireLength: int(frame.Header().Length),
}
var (
@@ -536,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
// Attach the received metadata to the context.
if len(mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
- if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
- s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
- }
- if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
- s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
- }
}
t.mu.Lock()
if t.state != reachable {
@@ -567,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
- httpStatus: 405,
+ httpStatus: http.StatusMethodNotAllowed,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
@@ -588,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
stat = status.New(codes.PermissionDenied, err.Error())
}
t.controlBuf.put(&earlyAbortStream{
- httpStatus: 200,
+ httpStatus: http.StatusOK,
streamID: s.id,
contentSubtype: s.contentSubtype,
status: stat,
@@ -613,10 +610,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
defer func() {
close(t.readerDone)
<-t.loopyWriterDone
@@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
}
}
-func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
t.mu.Lock()
defer t.mu.Unlock()
if t.activeStreams == nil {
@@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
-func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) {
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *Stream, n uint32) {
+func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
increment: w,
@@ -813,10 +809,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@@ -960,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool {
return true
}
-func (t *http2Server) streamContextErr(s *Stream) error {
+func (t *http2Server) streamContextErr(s *ServerStream) error {
select {
case <-t.done:
return ErrConnClosing
@@ -970,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error {
}
// WriteHeader sends the header metadata md back to the client.
-func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
s.hdrMu.Lock()
defer s.hdrMu.Unlock()
if s.getState() == streamDone {
@@ -1003,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
-func (t *http2Server) writeHeaderLocked(s *Stream) error {
+func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
@@ -1043,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
s.hdrMu.Lock()
defer s.hdrMu.Unlock()
@@ -1089,7 +1088,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -1112,27 +1113,38 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
+ reader := data.Reader()
+
if !s.isHeaderSent() { // Headers haven't been written yet.
- if err := t.WriteHeader(s, nil); err != nil {
+ if err := t.writeHeader(s, nil); err != nil {
+ _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
+ _ = reader.Close()
return t.streamContextErr(s)
}
}
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ reader: reader,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ t.incrMsgSent()
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -1223,7 +1235,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
@@ -1261,7 +1273,7 @@ func (t *http2Server) Close(err error) {
}
// deleteStream deletes the stream s from transport's active streams.
-func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
+func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
@@ -1282,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
-func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
@@ -1306,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
@@ -1400,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
}
}
-func (t *http2Server) IncrMsgSent() {
- t.channelz.SocketMetrics.MessagesSent.Add(1)
- t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
+func (t *http2Server) incrMsgSent() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
+ }
}
-func (t *http2Server) IncrMsgRecv() {
- t.channelz.SocketMetrics.MessagesReceived.Add(1)
- t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
+func (t *http2Server) incrMsgRecv() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
+ }
}
func (t *http2Server) getOutFlowWindow() int64 {
@@ -1440,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration {
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
+ j := rand.Int64N(2*r) - r
return time.Duration(j)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 39cef3bd4..3613d7b64 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- n, err = w.conn.Write(b)
+ n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.flushKeepBuffer()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
@@ -389,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
-var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
+var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 24fa10325..d77384595 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -30,34 +30,16 @@ import (
"net/url"
"google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/resolver"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
-var (
- // The following variable will be overwritten in the tests.
- httpProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-func mapAddress(address string) (*url.URL, error) {
- req := &http.Request{
- URL: &url.URL{
- Scheme: "https",
- Host: address,
- },
- }
- url, err := httpProxyFromEnvironment(req)
- if err != nil {
- return nil, err
- }
- return url, nil
-}
-
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response and stores
-// those bytes in the buffer.
-// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
-// bytes in the buffer.
+// It's possible that this reader reads more than what's need for the response
+// and stores those bytes in the buffer. bufConn wraps the original net.Conn
+// and the bufio.Reader to make sure we don't lose the bytes in the buffer.
type bufConn struct {
net.Conn
r io.Reader
@@ -72,7 +54,7 @@ func basicAuth(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(auth))
}
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) {
defer func() {
if err != nil {
conn.Close()
@@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
req := &http.Request{
Method: http.MethodConnect,
- URL: &url.URL{Host: backendAddr},
+ URL: &url.URL{Host: opts.ConnectAddr},
Header: map[string][]string{"User-Agent": {grpcUA}},
}
- if t := proxyURL.User; t != nil {
- u := t.Username()
- p, _ := t.Password()
+ if user := opts.User; user != nil {
+ u := user.Username()
+ p, _ := user.Password()
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
}
-
if err := sendHTTPRequest(ctx, req, conn); err != nil {
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
}
@@ -107,32 +88,23 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
-
- return &bufConn{Conn: conn, r: r}, nil
-}
-
-// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
-// is necessary, dials, does the HTTP CONNECT handshake, and returns the
-// connection.
-func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
- newAddr := addr
- proxyURL, err := mapAddress(addr)
- if err != nil {
- return nil, err
- }
- if proxyURL != nil {
- newAddr = proxyURL.Host
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
}
+ return conn, nil
+}
- conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
+// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake.
+func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) {
+ conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr)
if err != nil {
return nil, err
}
- if proxyURL == nil {
- // proxy is disabled if proxyURL is nil.
- return conn, err
- }
- return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ return doHTTPConnectHandshake(ctx, conn, grpcUA, opts)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
new file mode 100644
index 000000000..a22a90151
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -0,0 +1,178 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "context"
+ "errors"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// ServerStream implements streaming functionality for a gRPC server.
+type ServerStream struct {
+ *Stream // Embed for common stream functionality.
+
+ st internalServerTransport
+ ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
+ cancel context.CancelFunc // invoked at the end of stream to cancel ctx.
+
+ // Holds compressor names passed in grpc-accept-encoding metadata from the
+ // client.
+ clientAdvertisedCompressors string
+ headerWireLength int
+
+ // hdrMu protects outgoing header and trailer metadata.
+ hdrMu sync.Mutex
+ header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
+ headerSent atomic.Bool // atomically set when the headers are sent out.
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
+ b, err := s.Stream.read(n)
+ if err == nil {
+ s.st.incrMsgRecv()
+ }
+ return b, err
+}
+
+// SendHeader sends the header metadata for the given stream.
+func (s *ServerStream) SendHeader(md metadata.MD) error {
+ return s.st.writeHeader(s, md)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+ return s.st.write(s, hdr, data, opts)
+}
+
+// WriteStatus sends the status of a stream to the client. WriteStatus is
+// the final call made on a stream and always occurs.
+func (s *ServerStream) WriteStatus(st *status.Status) error {
+ return s.st.writeStatus(s, st)
+}
+
+// isHeaderSent indicates whether headers have been sent.
+func (s *ServerStream) isHeaderSent() bool {
+ return s.headerSent.Load()
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was already set.
+func (s *ServerStream) updateHeaderSent() bool {
+ return s.headerSent.Swap(true)
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ServerStream) RecvCompress() string {
+ return s.recvCompress
+}
+
+// SendCompress returns the send compressor name.
+func (s *ServerStream) SendCompress() string {
+ return s.sendCompress
+}
+
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase. See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *ServerStream) ContentSubtype() string {
+ return s.contentSubtype
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *ServerStream) SetSendCompress(name string) error {
+ if s.isHeaderSent() || s.getState() == streamDone {
+ return errors.New("transport: set send compressor called after headers sent or stream done")
+ }
+
+ s.sendCompress = name
+ return nil
+}
+
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *ServerStream) SetContext(ctx context.Context) {
+ s.ctx = ctx
+}
+
+// ClientAdvertisedCompressors returns the compressor names advertised by the
+// client via grpc-accept-encoding header.
+func (s *ServerStream) ClientAdvertisedCompressors() []string {
+ values := strings.Split(s.clientAdvertisedCompressors, ",")
+ for i, v := range values {
+ values[i] = strings.TrimSpace(v)
+ }
+ return values
+}
+
+// Header returns the header metadata of the stream. It returns the out header
+// after t.WriteHeader is called. It does not block and must not be called
+// until after WriteHeader.
+func (s *ServerStream) Header() (metadata.MD, error) {
+ // Return the header in stream. It will be the out
+ // header after t.WriteHeader is called.
+ return s.header.Copy(), nil
+}
+
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire.
+func (s *ServerStream) HeaderWireLength() int {
+ return s.headerWireLength
+}
+
+// SetHeader sets the header metadata. This can be called multiple times.
+// This should not be called in parallel to other data writes.
+func (s *ServerStream) SetHeader(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ if s.isHeaderSent() || s.getState() == streamDone {
+ return ErrIllegalHeaderWrite
+ }
+ s.hdrMu.Lock()
+ s.header = metadata.Join(s.header, md)
+ s.hdrMu.Unlock()
+ return nil
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times.
+// This should not be called parallel to other data writes.
+func (s *ServerStream) SetTrailer(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ if s.getState() == streamDone {
+ return ErrIllegalHeaderWrite
+ }
+ s.hdrMu.Lock()
+ s.trailer = metadata.Join(s.trailer, md)
+ s.hdrMu.Unlock()
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 4b39c0ade..af4a4aeab 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,13 +22,11 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
"io"
"net"
- "strings"
"sync"
"sync/atomic"
"time"
@@ -37,9 +35,9 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
- "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
@@ -47,32 +45,10 @@ import (
const logLevel = 2
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() any {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
-
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -102,6 +78,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -148,45 +127,70 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
+ }
+ if r.closeStream != nil {
+ n, r.err = r.readMessageHeaderClient(header)
+ } else {
+ n, r.err = r.readMessageHeader(header)
+ }
+ return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
r.last = nil
}
- return copied, nil
+ return buf, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ buf, r.err = r.readClient(n)
} else {
- n, r.err = r.read(p)
+ buf, r.err = r.read(n)
}
- return n, r.err
+ return buf, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -207,25 +211,67 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readAdditional(m, n)
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
}
- return copied, nil
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
+ }
+
+ return m.buffer, nil
}
type streamState uint32
@@ -240,73 +286,26 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
id uint32
- st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
- ctx context.Context // the associated context of the stream
- cancel context.CancelFunc // always nil for client side Stream
- done chan struct{} // closed at the end of stream to unblock writers. On the client side.
- doneFunc func() // invoked at the end of stream on client side.
- ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
- method string // the associated RPC method of the stream
+ ctx context.Context // the associated context of the stream
+ method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
- // Holds compressor names passed in grpc-accept-encoding metadata from the
- // client. This is empty for the client side stream.
- clientAdvertisedCompressors string
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if needed.
requestRead func(int)
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
- // headerValid indicates whether a valid header was received. Only
- // meaningful after headerChan is closed (always call waitOnHeader() before
- // reading its value). Not valid on server side.
- headerValid bool
- headerWireLength int // Only set on server side.
-
- // hdrMu protects header and trailer metadata on the server-side.
- hdrMu sync.Mutex
- // On client side, header keeps the received header metadata.
- //
- // On server side, header keeps the header set by SetHeader(). The complete
- // header will merged into this after t.WriteHeader() is called.
- header metadata.MD
- trailer metadata.MD // the key-value map of trailer metadata.
-
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
- headerSent uint32
-
state streamState
- // On client-side it is the status error received from the server.
- // On server-side it is unused.
- status *status.Status
-
- bytesReceived uint32 // indicates whether any bytes have been received on this stream
- unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
-
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
-}
-
-// isHeaderSent is only valid on the server-side.
-func (s *Stream) isHeaderSent() bool {
- return atomic.LoadUint32(&s.headerSent) == 1
-}
-// updateHeaderSent updates headerSent and returns true
-// if it was already set. It is valid only on server-side.
-func (s *Stream) updateHeaderSent() bool {
- return atomic.SwapUint32(&s.headerSent, 1) == 1
+ trailer metadata.MD // the key-value map of trailer metadata.
}
func (s *Stream) swapState(st streamState) streamState {
@@ -321,110 +320,12 @@ func (s *Stream) getState() streamState {
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
}
-func (s *Stream) waitOnHeader() {
- if s.headerChan == nil {
- // On the server headerChan is always nil since a stream originates
- // only after having received headers.
- return
- }
- select {
- case <-s.ctx.Done():
- // Close the stream to prevent headers/trailers from changing after
- // this function returns.
- s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
- // headerChan could possibly not be closed yet if closeStream raced
- // with operateHeaders; wait until it is closed explicitly here.
- <-s.headerChan
- case <-s.headerChan:
- }
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *Stream) RecvCompress() string {
- s.waitOnHeader()
- return s.recvCompress
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *Stream) SetSendCompress(name string) error {
- if s.isHeaderSent() || s.getState() == streamDone {
- return errors.New("transport: set send compressor called after headers sent or stream done")
- }
-
- s.sendCompress = name
- return nil
-}
-
-// SendCompress returns the send compressor name.
-func (s *Stream) SendCompress() string {
- return s.sendCompress
-}
-
-// ClientAdvertisedCompressors returns the compressor names advertised by the
-// client via grpc-accept-encoding header.
-func (s *Stream) ClientAdvertisedCompressors() []string {
- values := strings.Split(s.clientAdvertisedCompressors, ",")
- for i, v := range values {
- values[i] = strings.TrimSpace(v)
- }
- return values
-}
-
-// Done returns a channel which is closed when it receives the final status
-// from the server.
-func (s *Stream) Done() <-chan struct{} {
- return s.done
-}
-
-// Header returns the header metadata of the stream.
-//
-// On client side, it acquires the key-value pairs of header metadata once it is
-// available. It blocks until i) the metadata is ready or ii) there is no header
-// metadata or iii) the stream is canceled/expired.
-//
-// On server side, it returns the out header after t.WriteHeader is called. It
-// does not block and must not be called until after WriteHeader.
-func (s *Stream) Header() (metadata.MD, error) {
- if s.headerChan == nil {
- // On server side, return the header in stream. It will be the out
- // header after t.WriteHeader is called.
- return s.header.Copy(), nil
- }
- s.waitOnHeader()
-
- if !s.headerValid || s.noHeaders {
- return nil, s.status.Err()
- }
-
- return s.header.Copy(), nil
-}
-
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil. Client-side only.
-func (s *Stream) TrailersOnly() bool {
- s.waitOnHeader()
- return s.noHeaders
-}
-
-// Trailer returns the cached trailer metedata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD. Client
-// side only.
+// Trailer returns the cached trailer metadata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD.
// It can be safely read only after stream has ended that is either read
// or write have returned io.EOF.
func (s *Stream) Trailer() metadata.MD {
- c := s.trailer.Copy()
- return c
-}
-
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase. See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *Stream) ContentSubtype() string {
- return s.contentSubtype
+ return s.trailer.Copy()
}
// Context returns the context of the stream.
@@ -432,114 +333,104 @@ func (s *Stream) Context() context.Context {
return s.ctx
}
-// SetContext sets the context of the stream. This will be deleted once the
-// stats handler callouts all move to gRPC layer.
-func (s *Stream) SetContext(ctx context.Context) {
- s.ctx = ctx
-}
-
// Method returns the method for the stream.
func (s *Stream) Method() string {
return s.method
}
-// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
-func (s *Stream) Status() *status.Status {
- return s.status
-}
-
-// HeaderWireLength returns the size of the headers of the stream as received
-// from the wire. Valid only on the server.
-func (s *Stream) HeaderWireLength() int {
- return s.headerWireLength
-}
-
-// SetHeader sets the header metadata. This can be called multiple times.
-// Server side only.
-// This should not be called in parallel to other data writes.
-func (s *Stream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.isHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.header = metadata.Join(s.header, md)
- s.hdrMu.Unlock()
- return nil
-}
-
-// SendHeader sends the given header metadata. The given metadata is
-// combined with any metadata set by previous calls to SetHeader and
-// then written to the transport stream.
-func (s *Stream) SendHeader(md metadata.MD) error {
- return s.st.WriteHeader(s, md)
+func (s *Stream) write(m recvMsg) {
+ s.buf.put(m)
}
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can be called multiple times. Server side only.
-// This should not be called parallel to other data writes.
-func (s *Stream) SetTrailer(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
+// ReadMessageHeader reads data into the provided header slice from the stream.
+// It first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered with
+// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
+// unexpected end of the stream. The method returns any error encountered during
+// the read process or nil if the header was successfully read.
+func (s *Stream) ReadMessageHeader(header []byte) (err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return er
}
- if s.getState() == streamDone {
- return ErrIllegalHeaderWrite
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadMessageHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
}
- s.hdrMu.Lock()
- s.trailer = metadata.Join(s.trailer, md)
- s.hdrMu.Unlock()
return nil
}
-func (s *Stream) write(m recvMsg) {
- s.buf.put(m)
-}
-
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return nil, er
+ }
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ return data, nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadMessageHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
}
t.windowHandler(n)
- return
+ return n, nil
}
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
- return atomic.LoadUint32(&s.bytesReceived) == 1
-}
-
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *Stream) Unprocessed() bool {
- return atomic.LoadUint32(&s.unprocessed) == 1
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
+ }
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// GoString is implemented by Stream so context.String() won't
@@ -574,6 +465,7 @@ type ServerConfig struct {
ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
+ BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -610,19 +502,13 @@ type ConnectOptions struct {
ChannelzParent *channelz.SubChannel
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
- // UseProxy specifies if a proxy should be used.
- UseProxy bool
-}
-
-// NewClientTransport establishes the transport with the required ConnectOptions
-// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
}
-// Options provides additional hints and information for message
+// WriteOptions provides additional hints and information for message
// transmission.
-type Options struct {
+type WriteOptions struct {
// Last indicates whether this write is the last piece for
// this stream.
Last bool
@@ -671,18 +557,8 @@ type ClientTransport interface {
// It does not block.
GracefulClose()
- // Write sends the data for the given stream. A nil stream indicates
- // the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
// NewStream creates a Stream for an RPC.
- NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
-
- // CloseStream clears the footprint of a stream when the stream is
- // not needed any more. The err indicates the error incurred when
- // CloseStream is called. Must be called when a stream is finished
- // unless the associated transport is closing.
- CloseStream(stream *Stream, err error)
+ NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
// Error returns a channel that is closed when some I/O error
// happens. Typically the caller should have a goroutine to monitor
@@ -702,12 +578,6 @@ type ClientTransport interface {
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
-
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
}
// ServerTransport is the common interface for all gRPC server-side transport
@@ -717,19 +587,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(context.Context, func(*Stream))
-
- // WriteHeader sends the header metadata for the given stream.
- // WriteHeader may not be called on all streams.
- WriteHeader(s *Stream, md metadata.MD) error
-
- // Write sends the data for the given stream.
- // Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
- // WriteStatus sends the status of a stream to the client. WriteStatus is
- // the final call made on a stream and always occurs.
- WriteStatus(s *Stream, st *status.Status) error
+ HandleStreams(context.Context, func(*ServerStream))
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
@@ -741,12 +599,14 @@ type ServerTransport interface {
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain(debugData string)
+}
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
+type internalServerTransport interface {
+ ServerTransport
+ writeHeader(s *ServerStream, md metadata.MD) error
+ write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
+ writeStatus(s *ServerStream, st *status.Status) error
+ incrMsgRecv()
}
// connectionErrorf creates an ConnectionError with the specified error description.
@@ -798,7 +658,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5e7..eb42b19fb 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 000000000..c37c58c02
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 000000000..65002e2cc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,281 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+const (
+ // 32 KiB is what io.Copy uses.
+ readAllBufSize = 32 * 1024
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is
+// added to the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+//
+// Important: A failed call returns a non-nil error and may also return
+// partially read buffers. It is the responsibility of the caller to free the
+// BufferSlice returned, or its memory will not be reused.
+func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
+ var result BufferSlice
+ if wt, ok := r.(io.WriterTo); ok {
+ // This is more optimal since wt knows the size of chunks it wants to
+ // write and, hence, we can allocate buffers of an optimal size to fit
+ // them. E.g. might be a single big chunk, and we wouldn't chop it
+ // into pieces.
+ w := NewWriter(&result, pool)
+ _, err := wt.WriteTo(w)
+ return result, err
+ }
+nextBuffer:
+ for {
+ buf := pool.Get(readAllBufSize)
+ // We asked for 32KiB but may have been given a bigger buffer.
+ // Use all of it if that's the case.
+ *buf = (*buf)[:cap(*buf)]
+ usedCap := 0
+ for {
+ n, err := r.Read((*buf)[usedCap:])
+ usedCap += n
+ if err != nil {
+ if usedCap == 0 {
+ // Nothing in this buf, put it back
+ pool.Put(buf)
+ } else {
+ *buf = (*buf)[:usedCap]
+ result = append(result, NewBuffer(buf, pool))
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return result, err
+ }
+ if len(*buf) == usedCap {
+ result = append(result, NewBuffer(buf, pool))
+ continue nextBuffer
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 000000000..ecbf0b9a7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,268 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ // Use the buffer's capacity instead of the length, otherwise buffers may
+ // not be reused under certain conditions. For example, if a large buffer
+ // is acquired from the pool, but fewer bytes than the buffering threshold
+ // are written to it, the buffer will not be returned to the pool.
+ if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
+type SliceBuffer []byte
+
+// ReadOnlyData returns the byte slice.
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 1e9485fd6..d2e15253b 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // Case insenitive comparison: MD is a map, and there's no guarantee
+ // Case insensitive comparison: MD is a map, and there's no guarantee
// that the MD attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 56e8aba78..a2d2a798d 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -22,7 +22,7 @@ import (
"context"
"fmt"
"io"
- "sync"
+ "sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
@@ -33,35 +33,43 @@ import (
"google.golang.org/grpc/status"
)
+// pickerGeneration stores a picker and a channel used to signal that a picker
+// newer than this one is available.
+type pickerGeneration struct {
+ // picker is the picker produced by the LB policy. May be nil if a picker
+ // has never been produced.
+ picker balancer.Picker
+ // blockingCh is closed when the picker has been invalidated because there
+ // is a new one available.
+ blockingCh chan struct{}
+}
+
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
- mu sync.Mutex
- done bool
- blockingCh chan struct{}
- picker balancer.Picker
+ // If pickerGen holds a nil pointer, the pickerWrapper is closed.
+ pickerGen atomic.Pointer[pickerGeneration]
statsHandlers []stats.Handler // to record blocking picker calls
}
func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
- return &pickerWrapper{
- blockingCh: make(chan struct{}),
+ pw := &pickerWrapper{
statsHandlers: statsHandlers,
}
+ pw.pickerGen.Store(&pickerGeneration{
+ blockingCh: make(chan struct{}),
+ })
+ return pw
}
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+// updatePicker is called by UpdateState calls from the LB policy. It
+// unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
- pw.mu.Lock()
- if pw.done {
- pw.mu.Unlock()
- return
- }
- pw.picker = p
- // pw.blockingCh should never be nil.
- close(pw.blockingCh)
- pw.blockingCh = make(chan struct{})
- pw.mu.Unlock()
+ old := pw.pickerGen.Swap(&pickerGeneration{
+ picker: p,
+ blockingCh: make(chan struct{}),
+ })
+ close(old.blockingCh)
}
// doneChannelzWrapper performs the following:
@@ -98,27 +106,24 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
var lastPickErr error
for {
- pw.mu.Lock()
- if pw.done {
- pw.mu.Unlock()
+ pg := pw.pickerGen.Load()
+ if pg == nil {
return nil, balancer.PickResult{}, ErrClientConnClosing
}
-
- if pw.picker == nil {
- ch = pw.blockingCh
+ if pg.picker == nil {
+ ch = pg.blockingCh
}
- if ch == pw.blockingCh {
+ if ch == pg.blockingCh {
// This could happen when either:
// - pw.picker is nil (the previous if condition), or
- // - has called pick on the current picker.
- pw.mu.Unlock()
+ // - we have already called pick on the current picker.
select {
case <-ctx.Done():
var errStr string
if lastPickErr != nil {
errStr = "latest balancer error: " + lastPickErr.Error()
} else {
- errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
+ errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err())
}
switch ctx.Err() {
case context.DeadlineExceeded:
@@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
}
- ch = pw.blockingCh
- p := pw.picker
- pw.mu.Unlock()
+ ch = pg.blockingCh
+ p := pg.picker
pickResult, err := p.Pick(info)
if err != nil {
@@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
func (pw *pickerWrapper) close() {
- pw.mu.Lock()
- defer pw.mu.Unlock()
- if pw.done {
- return
- }
- pw.done = true
- close(pw.blockingCh)
+ old := pw.pickerGen.Swap(nil)
+ close(old.blockingCh)
}
// reset clears the pickerWrapper and prepares it for being used again when idle
// mode is exited.
func (pw *pickerWrapper) reset() {
- pw.mu.Lock()
- defer pw.mu.Unlock()
- if pw.done {
- return
- }
- pw.blockingCh = make(chan struct{})
+ old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
+ close(old.blockingCh)
}
// dropError is a wrapper error that indicates the LB policy wishes to drop the
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 73bd63364..ee0ff969a 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -20,6 +20,7 @@ package grpc
import (
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -31,9 +32,10 @@ import (
// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
- encodedData []byte
+ encodedData mem.BufferSlice
hdr []byte
- payload []byte
+ payload mem.BufferSlice
+ pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
@@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
if err != nil {
return err
}
- p.encodedData = data
- compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+ materializedData := data.Materialize()
+ data.Free()
+ p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
+
+ // TODO: it should be possible to grab the bufferPool from the underlying
+ // stream implementation with a type cast to its actual type (such as
+ // addrConnStream) and accessing the buffer pool directly.
+ var compData mem.BufferSlice
+ compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
- p.hdr, p.payload = msgHeader(data, compData)
+
+ if p.pf.isCompressed() {
+ materializedCompData := compData.Materialize()
+ compData.Free()
+ compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
+ }
+
+ p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
return nil
}
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
deleted file mode 100644
index 3edca296c..000000000
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-# Copyright 2020 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eu -o pipefail
-
-WORKDIR=$(mktemp -d)
-
-function finish {
- rm -rf "$WORKDIR"
-}
-trap finish EXIT
-
-export GOBIN=${WORKDIR}/bin
-export PATH=${GOBIN}:${PATH}
-mkdir -p ${GOBIN}
-
-echo "remove existing generated files"
-# grpc_testing_not_regenerate/*.pb.go is not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
-
-echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
-(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
-
-echo "go install cmd/protoc-gen-go-grpc"
-(cd cmd/protoc-gen-go-grpc && go install .)
-
-echo "git clone https://github.com/grpc/grpc-proto"
-git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
-
-echo "git clone https://github.com/protocolbuffers/protobuf"
-git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
-
-# Pull in code.proto as a proto dependency
-mkdir -p ${WORKDIR}/googleapis/google/rpc
-echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
-curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
-
-mkdir -p ${WORKDIR}/out
-
-# Generates sources without the embed requirement
-LEGACY_SOURCES=(
- ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
- ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
- ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
- ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
- profiling/proto/service.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
-)
-
-# Generates only the new gRPC Service symbols
-SOURCES=(
- $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
- ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
- ${WORKDIR}/grpc-proto/grpc/testing/*.proto
- ${WORKDIR}/grpc-proto/grpc/core/*.proto
-)
-
-# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
-# import path of 'bar' in the generated code when 'foo.proto' is imported in
-# one of the sources.
-#
-# Note that the protos listed here are all for testing purposes. All protos to
-# be used externally should have a go_package option (and they don't need to be
-# listed here).
-OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
-Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
-
-for src in ${SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-for src in ${LEGACY_SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
-# current location. Move it into the right place.
-mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-
-# grpc_testing_not_regenerate/*.pb.go are not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
-
-cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
index f2efa2a2c..09e864a89 100644
--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) {
// Build returns itself for Resolver, because it's both a builder and a resolver.
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
- r.BuildCallback(target, cc, opts)
r.mu.Lock()
defer r.mu.Unlock()
+ // Call BuildCallback after locking to avoid a race when UpdateState
+ // or ReportError is called before Build returns.
+ r.BuildCallback(target, cc, opts)
r.CC = cc
if r.lastSeenState != nil {
err := r.CC.UpdateState(*r.lastSeenState)
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 202854511..b84ef26d4 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -22,6 +22,7 @@ package resolver
import (
"context"
+ "errors"
"fmt"
"net"
"net/url"
@@ -29,6 +30,7 @@ import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
)
@@ -174,6 +176,8 @@ type BuildOptions struct {
// Authority is the effective authority of the clientconn for which the
// resolver is built.
Authority string
+ // MetricsRecorder is the metrics recorder to do recording.
+ MetricsRecorder stats.MetricsRecorder
}
// An Endpoint is one network endpoint, or server, which may have multiple
@@ -237,8 +241,8 @@ type ClientConn interface {
// UpdateState can be omitted.
UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
- // error. The ClientConn will notify the load balancer and begin calling
- // ResolveNow on the Resolver with exponential backoff.
+ // error. The ClientConn then forwards this error to the load balancing
+ // policy.
ReportError(error)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
@@ -330,3 +334,20 @@ type AuthorityOverrider interface {
// typically in line, and must keep it unchanged.
OverrideAuthority(Target) string
}
+
+// ValidateEndpoints validates endpoints from a petiole policy's perspective.
+// Petiole policies should call this before calling into their children. See
+// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
+// for details.
+func ValidateEndpoints(endpoints []Endpoint) error {
+ if len(endpoints) == 0 {
+ return errors.New("endpoints list is empty")
+ }
+
+ for _, endpoint := range endpoints {
+ for range endpoint.Addresses {
+ return nil
+ }
+ }
+ return errors.New("endpoints list contains no addresses")
+}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 9dcc9780f..945e24ff8 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -26,6 +26,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/internal/resolver/delegatingresolver"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -66,7 +67,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
// any newly created ccResolverWrapper, except that close may be called instead.
func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
return
}
@@ -76,16 +77,26 @@ func (ccr *ccResolverWrapper) start() error {
CredsBundle: ccr.cc.dopts.copts.CredsBundle,
Dialer: ccr.cc.dopts.copts.Dialer,
Authority: ccr.cc.authority,
+ MetricsRecorder: ccr.cc.metricsRecorderList,
}
var err error
- ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ // The delegating resolver is used unless:
+ // - A custom dialer is provided via WithContextDialer dialoption or
+ // - Proxy usage is disabled through WithNoProxy dialoption.
+ // In these cases, the resolver is built based on the scheme of target,
+ // using the appropriate resolver builder.
+ if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy {
+ ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ } else {
+ ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution)
+ }
errCh <- err
})
return <-errCh
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccr.resolver == nil {
return
}
@@ -102,7 +113,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.closed = true
ccr.mu.Unlock()
- ccr.serializer.Schedule(func(context.Context) {
+ ccr.serializer.TrySchedule(func(context.Context) {
if ccr.resolver == nil {
return
}
@@ -171,12 +182,15 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
// ParseServiceConfig is called by resolver implementations to parse a JSON
// representation of the service config.
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON)
+ return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
}
// addChannelzTraceEvent adds a channelz trace event containing the new
// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ if !logger.V(0) && !channelz.IsOn() {
+ return
+ }
var updates []string
var oldSC, newSC *ServiceConfig
var oldOK, newOK bool
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index fdd49e6e9..a8ddb0af5 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,7 +19,6 @@
package grpc
import (
- "bytes"
"compress/gzip"
"context"
"encoding/binary"
@@ -35,6 +34,7 @@ import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- compressorType string
+ compressorName string
failFast bool
maxReceiveMessageSize *int
maxSendMessageSize *int
@@ -220,9 +220,9 @@ type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
- *o.HeaderAddr, _ = attempt.s.Header()
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
+ *o.HeaderAddr, _ = attempt.transportStream.Header()
}
// Trailer returns a CallOptions that retrieves the trailer metadata
@@ -242,9 +242,9 @@ type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
- *o.TrailerAddr = attempt.s.Trailer()
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
+ *o.TrailerAddr = attempt.transportStream.Trailer()
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -264,24 +264,20 @@ type PeerCallOption struct {
PeerAddr *peer.Peer
}
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
- if x, ok := peer.FromContext(attempt.s.Context()); ok {
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
+ if x, ok := peer.FromContext(attempt.transportStream.Context()); ok {
*o.PeerAddr = *x
}
}
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false and the
-// connection is in the TRANSIENT_FAILURE state, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
+// waitForReady is false, the RPC will fail immediately. Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
//
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
@@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
return nil
}
-func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
@@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
@@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@@ -439,10 +435,10 @@ type CompressorCallOption struct {
}
func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorType = o.CompressorType
+ c.compressorName = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
// ForceCodec returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
@@ -515,10 +511,50 @@ type ForceCodecCallOption struct {
}
func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV1Bridge(o.Codec)
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+ return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+ CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+ c.codec = o.CodecV2
+ return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -540,10 +576,10 @@ type CustomCodecCallOption struct {
}
func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV0Bridge(o.Codec)
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
@@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -581,19 +617,28 @@ const (
compressionMade payloadFormat = 1 // compressed
)
+func (pf payloadFormat) isCompressed() bool {
+ return pf == compressionMade
+}
+
+type streamReader interface {
+ ReadMessageHeader(header []byte) error
+ Read(n int) (mem.BufferSlice, error)
+}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
- r io.Reader
+ r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
- // recvBufferPool is the pool of shared receive buffers.
- recvBufferPool SharedBufferPool
+ // bufferPool is the pool of shared receive buffers.
+ bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -608,39 +653,38 @@ type parser struct {
// - an error from the status package
//
// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+ err := p.r.ReadMessageHeader(p.header[:])
+ if err != nil {
return 0, nil, err
}
- pf = payloadFormat(p.header[0])
+ pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
- if length == 0 {
- return pf, nil, nil
- }
if int64(length) > int64(maxInt) {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
}
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- msg = p.recvBufferPool.Get(int(length))
- if _, err := p.r.Read(msg); err != nil {
+
+ data, err := p.r.Read(int(length))
+ if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
- return pf, msg, nil
+ return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
-func encode(c baseCodec, msg any) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@@ -648,8 +692,9 @@ func encode(c baseCodec, msg any) ([]byte, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(len(b)) > math.MaxUint32 {
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+ if bufSize := uint(b.Len()); bufSize > math.MaxUint32 {
+ b.Free()
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize)
}
return b, nil
}
@@ -659,34 +704,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- if len(in) == 0 {
- return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+ if (compressor == nil && cp == nil) || in.Len() == 0 {
+ return nil, compressionNone, nil
}
+ var out mem.BufferSlice
+ w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
+ out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
- cbuf := &bytes.Buffer{}
if compressor != nil {
- z, err := compressor.Compress(cbuf)
+ z, err := compressor.Compress(w)
if err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ for _, b := range in {
+ if _, err := z.Write(b.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
+ }
}
if err := z.Close(); err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
} else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ // This is obviously really inefficient since it fully materializes the data, but
+ // there is no way around this with the old Compressor API. At least it attempts
+ // to return the buffer to the provider, in the hopes it can be reused (maybe
+ // even by a subsequent call to this very function).
+ buf := in.MaterializeToBuffer(pool)
+ defer buf.Free()
+ if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
}
}
- return cbuf.Bytes(), nil
+ return out, compressionMade, nil
}
const (
@@ -697,33 +749,36 @@ const (
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ hdr[0] = byte(pf)
+
+ var length uint32
+ if pf.isCompressed() {
+ length = uint32(compData.Len())
+ payload = compData
} else {
- hdr[0] = byte(compressionNone)
+ length = uint32(data.Len())
+ payload = data
}
// Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
+ binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+ return hdr, payload
}
-func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- CompressedLength: len(payload),
+ Length: dataLength,
+ WireLength: payloadLength + headerLen,
+ CompressedLength: payloadLength,
SentTime: t,
}
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@@ -731,7 +786,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if isServer {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
+ return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -741,104 +799,119 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
type payloadInfo struct {
compressedLength int // The compressed length got from wire.
- uncompressedBytes []byte
+ uncompressedBytes mem.BufferSlice
+}
+
+func (p *payloadInfo) free() {
+ if p != nil && p.uncompressedBytes != nil {
+ p.uncompressedBytes.Free()
+ }
}
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
//
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
// the buffer is no longer needed.
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
-) (uncompressedBuf []byte, cancel func(), err error) {
- pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+ pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return nil, nil, st.Err()
+ compressedLength := compressed.Len()
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+ compressed.Free()
+ return nil, st.Err()
}
- var size int
- if pf == compressionMade {
+ if pf.isCompressed() {
+ defer compressed.Free()
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
- if dc != nil {
- uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
- size = len(uncompressedBuf)
- } else {
- uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
- }
+ out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool)
if err != nil {
- return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
- }
- if size > maxReceiveMessageSize {
- // TODO: Revisit the error code. Currently keep it consistent with java
- // implementation.
- return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ return nil, err
}
} else {
- uncompressedBuf = compressedBuf
+ out = compressed
}
if payInfo != nil {
- payInfo.compressedLength = len(compressedBuf)
- payInfo.uncompressedBytes = uncompressedBuf
+ payInfo.compressedLength = compressedLength
+ out.Ref()
+ payInfo.uncompressedBytes = out
+ }
- cancel = func() {}
- } else {
- cancel = func() {
- p.recvBufferPool.Put(&compressedBuf)
+ return out, nil
+}
+
+// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor.
+// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data
+// does not exceed the specified maximum size and returns an error if this limit is exceeded.
+// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit.
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) {
+ if dc != nil {
+ uncompressed, err := dc.Do(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
+ if len(uncompressed) > maxReceiveMessageSize {
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize)
+ }
+ return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil
}
+ if compressor != nil {
+ dcReader, err := compressor.Decompress(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
+ }
- return uncompressedBuf, cancel, nil
-}
+ out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool)
+ if err != nil {
+ out.Free()
+ return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
+ }
-// Using compressor, decompress d, returning data and size.
-// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
- if err != nil {
- return nil, 0, err
- }
- if sizer, ok := compressor.(interface {
- DecompressedSize(compressedBytes []byte) int
- }); ok {
- if size := sizer.DecompressedSize(d); size >= 0 {
- if size > maxReceiveMessageSize {
- return nil, size, nil
- }
- // size is used as an estimate to size the buffer, but we
- // will read more data if available.
- // +MinRead so ReadFrom will not reallocate if size is correct.
- //
- // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
- // we can also utilize the recv buffer pool here.
- buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
- bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return buf.Bytes(), int(bytesRead), err
+ if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) {
+ out.Free()
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
}
+ return out, nil
}
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return d, len(d), err
+ return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
+}
+
+// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF.
+func atEOF(dcReader io.Reader) bool {
+ n, err := dcReader.Read(make([]byte, 1))
+ return n == 0 && err == io.EOF
+}
+
+type recvCompressor interface {
+ RecvCompress() string
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+ data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
- defer cancel()
- if err := c.Unmarshal(buf, m); err != nil {
+ // If the codec wants its own reference to the data, it can get it. Otherwise, always
+ // free the buffers.
+ defer data.Free()
+
+ if err := c.Unmarshal(data, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
+
return nil
}
@@ -941,7 +1014,7 @@ func setCallInfoCodec(c *callInfo) error {
// encoding.Codec (Name vs. String method name). We only support
// setting content subtype from encoding.Codec to avoid a behavior
// change with the deprecated version.
- if ec, ok := c.codec.(encoding.Codec); ok {
+ if ec, ok := c.codec.(encoding.CodecV2); ok {
c.contentSubtype = strings.ToLower(ec.Name())
}
}
@@ -950,12 +1023,12 @@ func setCallInfoCodec(c *callInfo) error {
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
+ c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
+ c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 89f8e4792..976e70ae0 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -37,14 +37,17 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -80,18 +83,22 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
- internal.RecvBufferPool = recvBufferPool
+ internal.BufferPool = bufferPool
+ internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
+ return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
+ }
}
var statusOK = status.New(codes.OK, "")
var logger = grpclog.Component("core")
-type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
+// MethodHandler is a function type that processes a unary RPC method call.
+type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
// MethodDesc represents an RPC service's method specification.
type MethodDesc struct {
MethodName string
- Handler methodHandler
+ Handler MethodHandler
}
// ServiceDesc represents an RPC service's specification.
@@ -170,7 +177,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
- recvBufferPool SharedBufferPool
+ bufferPool mem.BufferPool
waitForHandlers bool
}
@@ -181,7 +188,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
- recvBufferPool: nopBufferPool{},
+ bufferPool: mem.DefaultBufferPool(),
}
var globalServerOptions []ServerOption
@@ -313,7 +320,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV0Bridge(codec)
})
}
@@ -342,7 +349,22 @@ func CustomCodec(codec Codec) ServerOption {
// later release.
func ForceServerCodec(codec encoding.Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV1Bridge(codec)
+ })
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codecV2
})
}
@@ -592,26 +614,9 @@ func WaitForHandlers(w bool) ServerOption {
})
}
-// RecvBufferPool returns a ServerOption that configures the server
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: StatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
- return recvBufferPool(bufferPool)
-}
-
-func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.recvBufferPool = bufferPool
+ o.bufferPool = bufferPool
})
}
@@ -622,8 +627,8 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
-// serverWorkers blocks on a *transport.Stream channel forever and waits for
-// data to be fed by serveStreams. This allows multiple requests to be
+// serverWorker blocks on a *transport.ServerStream channel forever and waits
+// for data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
//
@@ -643,7 +648,7 @@ func (s *Server) serverWorker() {
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannel = make(chan func())
- s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
+ s.serverWorkerChannelClose = sync.OnceFunc(func() {
close(s.serverWorkerChannel)
})
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
@@ -980,6 +985,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
+ BufferPool: s.opts.bufferPool,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@@ -1020,7 +1026,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
}()
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
- st.HandleStreams(ctx, func(stream *transport.Stream) {
+ st.HandleStreams(ctx, func(stream *transport.ServerStream) {
s.handlersWG.Add(1)
streamQuota.acquire()
f := func() {
@@ -1072,7 +1078,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1136,26 +1142,41 @@ func (s *Server) incrCallsFailed() {
s.channelz.ServerMetrics.CallsFailed.Add(1)
}
-func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
- compData, err := compress(data, cp, comp)
+
+ compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
+ data.Free()
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
+
+ hdr, payload := msgHeader(data, compData, pf)
+
+ defer func() {
+ compData.Free()
+ data.Free()
+ // payload does not need to be freed here, it is either data or compData, both of
+ // which are already freed.
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if payloadLen > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
- err = t.Write(stream, hdr, payload, opts)
+ err = stream.Write(hdr, payload, opts)
if err == nil {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
+ if len(s.opts.statsHandlers) != 0 {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+ }
}
}
return err
@@ -1197,7 +1218,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
}
-func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
shs := s.opts.statsHandlers
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
@@ -1305,7 +1326,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
decomp = encoding.GetCompressor(rc)
if decomp == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(stream, st)
+ stream.WriteStatus(st)
return st.Err()
}
}
@@ -1334,37 +1355,42 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+ d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
- if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
+ if e := stream.WriteStatus(status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
- if channelz.IsOn() {
- t.IncrMsgRecv()
+ freed := false
+ dataFree := func() {
+ if !freed {
+ d.Free()
+ freed = true
+ }
}
+ defer dataFree()
df := func(v any) error {
- defer cancel()
-
+ defer dataFree()
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
+
for _, sh := range shs {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
- Length: len(d),
+ Length: d.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Data: d,
})
}
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: d,
+ Message: d.Materialize(),
}
for _, binlog := range binlogs {
binlog.Log(ctx, cm)
@@ -1389,7 +1415,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError()
}
- if e := t.WriteStatus(stream, appStatus); e != nil {
+ if e := stream.WriteStatus(appStatus); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
if len(binlogs) != 0 {
@@ -1416,20 +1442,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
- opts := &transport.Options{Last: true}
+ opts := &transport.WriteOptions{Last: true}
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if stream.SendCompress() != sendCompressorName {
comp = encoding.GetCompressor(stream.SendCompress())
}
- if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
+ if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
if sts, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, sts); e != nil {
+ if e := stream.WriteStatus(sts); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
@@ -1469,9 +1495,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
binlog.Log(ctx, sm)
}
}
- if channelz.IsOn() {
- t.IncrMsgSent()
- }
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
@@ -1487,7 +1510,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
binlog.Log(ctx, st)
}
}
- return t.WriteStatus(stream, statusOK)
+ return stream.WriteStatus(statusOK)
}
// chainStreamServerInterceptors chains all stream server interceptors into one.
@@ -1526,7 +1549,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
}
}
-func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
+func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
}
@@ -1546,9 +1569,8 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
- t: t,
s: stream,
- p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
+ p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@@ -1628,12 +1650,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- ss.dc = s.opts.dc
+ ss.decompressorV0 = s.opts.dc
} else if rc != "" && rc != encoding.Identity {
- ss.decomp = encoding.GetCompressor(rc)
- if ss.decomp == nil {
+ ss.decompressorV1 = encoding.GetCompressor(rc)
+ if ss.decompressorV1 == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
return st.Err()
}
}
@@ -1643,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
//
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
- ss.cp = s.opts.cp
+ ss.compressorV0 = s.opts.cp
ss.sendCompressorName = s.opts.cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
- ss.comp = encoding.GetCompressor(rc)
- if ss.comp != nil {
+ ss.compressorV1 = encoding.GetCompressor(rc)
+ if ss.compressorV1 != nil {
ss.sendCompressorName = rc
}
}
@@ -1659,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
}
}
- ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+ ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
@@ -1702,7 +1724,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
binlog.Log(ctx, st)
}
}
- t.WriteStatus(ss.s, appStatus)
+ ss.s.WriteStatus(appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
}
@@ -1720,10 +1742,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
binlog.Log(ctx, st)
}
}
- return t.WriteStatus(ss.s, statusOK)
+ return ss.s.WriteStatus(statusOK)
}
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
ctx := stream.Context()
ctx = contextWithServer(ctx, s)
var ti *traceInfo
@@ -1753,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
ti.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+ if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
if ti != nil {
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ti.tr.SetError()
@@ -1768,17 +1790,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
service := sm[:pos]
method := sm[pos+1:]
- md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
+ // FromIncomingContext is expensive: skip if there are no statsHandlers
+ if len(s.opts.statsHandlers) > 0 {
+ md, _ := metadata.FromIncomingContext(ctx)
+ for _, sh := range s.opts.statsHandlers {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ sh.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
+ }
}
// To have calls in stream callouts work. Will delete once all stats handler
// calls come from the gRPC layer.
@@ -1787,17 +1812,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
srv, knownService := s.services[service]
if knownService {
if md, ok := srv.methods[method]; ok {
- s.processUnaryRPC(ctx, t, stream, srv, md, ti)
+ s.processUnaryRPC(ctx, stream, srv, md, ti)
return
}
if sd, ok := srv.streams[method]; ok {
- s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
+ s.processStreamingRPC(ctx, stream, srv, sd, ti)
return
}
}
// Unknown service, or known server unknown method.
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
+ s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
return
}
var errDesc string
@@ -1810,7 +1835,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
ti.tr.LazyPrintf("%s", errDesc)
ti.tr.SetError()
}
- if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+ if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
if ti != nil {
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ti.tr.SetError()
@@ -1910,7 +1935,7 @@ func (s *Server) stop(graceful bool) {
s.conns = nil
if s.opts.numServerWorkers > 0 {
- // Closing the channel (only once, via grpcsync.OnceFunc) after all the
+ // Closing the channel (only once, via sync.OnceFunc) after all the
// connections have been closed above ensures that there are no
// goroutines executing the callback passed to st.HandleStreams (where
// the channel is written to).
@@ -1963,12 +1988,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return s.opts.codec
}
if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
- codec := encoding.GetCodec(contentSubtype)
+ codec := getCodec(contentSubtype)
if codec == nil {
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
return codec
}
@@ -2085,7 +2110,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func SetSendCompressor(ctx context.Context, name string) error {
- stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
if !ok || stream == nil {
return fmt.Errorf("failed to fetch the stream from the given context")
}
@@ -2107,7 +2132,7 @@ func SetSendCompressor(ctx context.Context, name string) error {
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
- stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
if !ok || stream == nil {
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 9da8fc802..8d451e07c 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -26,6 +26,7 @@ import (
"time"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
@@ -163,9 +164,12 @@ type jsonSC struct {
}
func init() {
- internal.ParseServiceConfig = parseServiceConfig
+ internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
+ return parseServiceConfig(js, defaultMaxCallAttempts)
+ }
}
-func parseServiceConfig(js string) *serviceconfig.ParseResult {
+
+func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
if len(js) == 0 {
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
}
@@ -183,12 +187,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
}
c := rsc.LoadBalancingConfig
if c == nil {
- name := PickFirstBalancerName
+ name := pickfirst.Name
if rsc.LoadBalancingPolicy != nil {
name = *rsc.LoadBalancingPolicy
}
if balancer.Get(name) == nil {
- name = PickFirstBalancerName
+ name = pickfirst.Name
}
cfg := []map[string]any{{name: struct{}{}}}
strCfg, err := json.Marshal(cfg)
@@ -218,7 +222,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
WaitForReady: m.WaitForReady,
Timeout: (*time.Duration)(m.Timeout),
}
- if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+ if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
@@ -264,38 +268,40 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
return &serviceconfig.ParseResult{Config: &sc}
}
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
+func isValidRetryPolicy(jrp *jsonRetryPolicy) bool {
+ return jrp.MaxAttempts > 1 &&
+ jrp.InitialBackoff > 0 &&
+ jrp.MaxBackoff > 0 &&
+ jrp.BackoffMultiplier > 0 &&
+ len(jrp.RetryableStatusCodes) > 0
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
if jrp == nil {
return nil, nil
}
- if jrp.MaxAttempts <= 1 ||
- jrp.InitialBackoff <= 0 ||
- jrp.MaxBackoff <= 0 ||
- jrp.BackoffMultiplier <= 0 ||
- len(jrp.RetryableStatusCodes) == 0 {
- logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
- return nil, nil
+ if !isValidRetryPolicy(jrp) {
+ return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp)
}
+ if jrp.MaxAttempts < maxAttempts {
+ maxAttempts = jrp.MaxAttempts
+ }
rp := &internalserviceconfig.RetryPolicy{
- MaxAttempts: jrp.MaxAttempts,
+ MaxAttempts: maxAttempts,
InitialBackoff: time.Duration(jrp.InitialBackoff),
MaxBackoff: time.Duration(jrp.MaxBackoff),
BackoffMultiplier: jrp.BackoffMultiplier,
RetryableStatusCodes: make(map[codes.Code]bool),
}
- if rp.MaxAttempts > 5 {
- // TODO(retry): Make the max maxAttempts configurable.
- rp.MaxAttempts = 5
- }
for _, code := range jrp.RetryableStatusCodes {
rp.RetryableStatusCodes[code] = true
}
return rp, nil
}
-func min(a, b *int) *int {
+func minPointers(a, b *int) *int {
if *a < *b {
return a
}
@@ -307,7 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
return &defaultVal
}
if mcMax != nil && doptMax != nil {
- return min(mcMax, doptMax)
+ return minPointers(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go
deleted file mode 100644
index 48a64cfe8..000000000
--- a/vendor/google.golang.org/grpc/shared_buffer_pool.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import "sync"
-
-// SharedBufferPool is a pool of buffers that can be shared, resulting in
-// decreased memory allocation. Currently, in gRPC-go, it is only utilized
-// for parsing incoming messages.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type SharedBufferPool interface {
- // Get returns a buffer with specified length from the pool.
- //
- // The returned byte slice may be not zero initialized.
- Get(length int) []byte
-
- // Put returns a buffer to the pool.
- Put(*[]byte)
-}
-
-// NewSharedBufferPool creates a simple SharedBufferPool with buckets
-// of different sizes to optimize memory usage. This prevents the pool from
-// wasting large amounts of memory, even when handling messages of varying sizes.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NewSharedBufferPool() SharedBufferPool {
- return &simpleSharedBufferPool{
- pools: [poolArraySize]simpleSharedBufferChildPool{
- newBytesPool(level0PoolMaxSize),
- newBytesPool(level1PoolMaxSize),
- newBytesPool(level2PoolMaxSize),
- newBytesPool(level3PoolMaxSize),
- newBytesPool(level4PoolMaxSize),
- newBytesPool(0),
- },
- }
-}
-
-// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
-type simpleSharedBufferPool struct {
- pools [poolArraySize]simpleSharedBufferChildPool
-}
-
-func (p *simpleSharedBufferPool) Get(size int) []byte {
- return p.pools[p.poolIdx(size)].Get(size)
-}
-
-func (p *simpleSharedBufferPool) Put(bs *[]byte) {
- p.pools[p.poolIdx(cap(*bs))].Put(bs)
-}
-
-func (p *simpleSharedBufferPool) poolIdx(size int) int {
- switch {
- case size <= level0PoolMaxSize:
- return level0PoolIdx
- case size <= level1PoolMaxSize:
- return level1PoolIdx
- case size <= level2PoolMaxSize:
- return level2PoolIdx
- case size <= level3PoolMaxSize:
- return level3PoolIdx
- case size <= level4PoolMaxSize:
- return level4PoolIdx
- default:
- return levelMaxPoolIdx
- }
-}
-
-const (
- level0PoolMaxSize = 16 // 16 B
- level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
- level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
- level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
- level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
-)
-
-const (
- level0PoolIdx = iota
- level1PoolIdx
- level2PoolIdx
- level3PoolIdx
- level4PoolIdx
- levelMaxPoolIdx
- poolArraySize
-)
-
-type simpleSharedBufferChildPool interface {
- Get(size int) []byte
- Put(any)
-}
-
-type bufferPool struct {
- sync.Pool
-
- defaultSize int
-}
-
-func (p *bufferPool) Get(size int) []byte {
- bs := p.Pool.Get().(*[]byte)
-
- if cap(*bs) < size {
- p.Pool.Put(bs)
-
- return make([]byte, size)
- }
-
- return (*bs)[:size]
-}
-
-func newBytesPool(size int) simpleSharedBufferChildPool {
- return &bufferPool{
- Pool: sync.Pool{
- New: func() any {
- bs := make([]byte, size)
- return &bs
- },
- },
- defaultSize: size,
- }
-}
-
-// nopBufferPool is a buffer pool just makes new buffer without pooling.
-type nopBufferPool struct {
-}
-
-func (nopBufferPool) Get(length int) []byte {
- return make([]byte, length)
-}
-
-func (nopBufferPool) Put(*[]byte) {
-}
diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go
new file mode 100644
index 000000000..641c8e979
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/metrics.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import "maps"
+
+// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetricSet instead.
+type MetricSet struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[string]bool
+}
+
+// NewMetricSet returns a MetricSet containing metricNames.
+func NewMetricSet(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for _, metric := range metricNames {
+ newMetrics[metric] = true
+ }
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *MetricSet) Metrics() map[string]bool {
+ return m.metrics
+}
+
+// Add adds the metricNames to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *MetricSet) Add(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metricNames {
+ newMetrics[metric] = true
+ }
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
+ newMetrics := make(map[string]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Remove removes the metricNames from the metrics set and returns a new copy
+// with the metrics removed.
+func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metricNames {
+ delete(newMetrics, metric)
+ }
+ return &MetricSet{metrics: newMetrics}
+}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index fdb0bd651..6f20d2d54 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -77,9 +77,6 @@ type InPayload struct {
// the call to HandleRPC which provides the InPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
@@ -150,9 +147,6 @@ type OutPayload struct {
// the call to HandleRPC which provides the OutPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
@@ -266,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client }
func (s *ConnEnd) isConnStats() {}
-type incomingTagsKey struct{}
-type outgoingTagsKey struct{}
-
// SetTags attaches stats tagging data to the context, which will be sent in
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
// SetTags will overwrite the values from earlier calls.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
func SetTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTagsKey{}, b)
+ return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
}
// Tags returns the tags from the context for the inbound RPC.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
func Tags(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTagsKey{}).([]byte)
- return b
-}
-
-// SetIncomingTags attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs).
-//
-// This is intended for gRPC-internal use ONLY.
-func SetIncomingTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTagsKey{}, b)
-}
-
-// OutgoingTags returns the tags from the context for the outbound RPC.
-//
-// This is intended for gRPC-internal use ONLY.
-func OutgoingTags(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
- return b
+ traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
+ if len(traceValues) == 0 {
+ return nil
+ }
+ return []byte(traceValues[len(traceValues)-1])
}
-type incomingTraceKey struct{}
-type outgoingTraceKey struct{}
-
// SetTrace attaches stats tagging data to the context, which will be sent in
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
// SetTrace will overwrite the values from earlier calls.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
func SetTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTraceKey{}, b)
+ return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
}
// Trace returns the trace from the context for the inbound RPC.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
func Trace(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTraceKey{}).([]byte)
- return b
-}
-
-// SetIncomingTrace attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs). It is intended for
-// gRPC-internal use.
-func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTraceKey{}, b)
-}
-
-// OutgoingTrace returns the trace from the context for the outbound RPC. It is
-// intended for gRPC-internal use.
-func OutgoingTrace(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
- return b
+ traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
+ if len(traceValues) == 0 {
+ return nil
+ }
+ return []byte(traceValues[len(traceValues)-1])
}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index b54563e81..12163150b 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -23,6 +23,7 @@ import (
"errors"
"io"
"math"
+ rand "math/rand/v2"
"strconv"
"sync"
"time"
@@ -34,13 +35,13 @@ import (
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -112,7 +113,9 @@ type ClientStream interface {
// SendMsg is generally called by generated code. On error, SendMsg aborts
// the stream. If the error was generated by the client, the status is
// returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg.
+ // the stream may be discovered using RecvMsg. For unary or server-streaming
+ // RPCs (StreamDesc.ClientStreams is false), a nil error is returned
+ // unconditionally.
//
// SendMsg blocks until:
// - There is sufficient flow control to schedule m with the transport, or
@@ -215,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
var mc serviceconfig.MethodConfig
var onCommit func()
- var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+ newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
}
@@ -255,9 +258,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
- c := defaultCallInfo()
+ callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
- c.failFast = !*mc.WaitForReady
+ callInfo.failFast = !*mc.WaitForReady
}
// Possible context leak:
@@ -278,20 +281,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}()
for _, o := range opts {
- if err := o.before(c); err != nil {
+ if err := o.before(callInfo); err != nil {
return nil, toRPCErr(err)
}
}
- c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
- c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(c); err != nil {
+ callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ if err := setCallInfoCodec(callInfo); err != nil {
return nil, err
}
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
- ContentSubtype: c.contentSubtype,
+ ContentSubtype: callInfo.contentSubtype,
DoneFunc: doneFunc,
}
@@ -299,22 +302,22 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
// set. In that case, also find the compressor from the encoding package.
// Otherwise, use the compressor configured by the WithCompressor DialOption,
// if set.
- var cp Compressor
- var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ var compressorV0 Compressor
+ var compressorV1 encoding.Compressor
+ if ct := callInfo.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
- comp = encoding.GetCompressor(ct)
- if comp == nil {
+ compressorV1 = encoding.GetCompressor(ct)
+ if compressorV1 == nil {
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- cp = cc.dopts.cp
+ } else if cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = cc.dopts.compressorV0.Type()
+ compressorV0 = cc.dopts.compressorV0
}
- if c.creds != nil {
- callHdr.Creds = c.creds
+ if callInfo.creds != nil {
+ callHdr.Creds = callInfo.creds
}
cs := &clientStream{
@@ -322,12 +325,12 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
ctx: ctx,
methodConfig: &mc,
opts: opts,
- callInfo: c,
+ callInfo: callInfo,
cc: cc,
desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
+ codec: callInfo.codec,
+ compressorV0: compressorV0,
+ compressorV1: compressorV1,
cancel: cancel,
firstAttempt: true,
onCommit: onCommit,
@@ -359,7 +362,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs.attempt = a
return nil
}
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
@@ -409,7 +412,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
return nil, ErrClientConnClosing
}
- ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+ ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
shs := cs.cc.dopts.copts.StatsHandlers
@@ -451,12 +454,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
}
return &csAttempt{
- ctx: ctx,
- beginTime: beginTime,
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandlers: shs,
- trInfo: trInfo,
+ ctx: ctx,
+ beginTime: beginTime,
+ cs: cs,
+ decompressorV0: cs.cc.dopts.dc,
+ statsHandlers: shs,
+ trInfo: trInfo,
}, nil
}
@@ -464,7 +467,7 @@ func (a *csAttempt) getTransport() error {
cs := a.cs
var err error
- a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
@@ -473,7 +476,7 @@ func (a *csAttempt) getTransport() error {
return err
}
if a.trInfo != nil {
- a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
+ a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
return nil
}
@@ -500,7 +503,7 @@ func (a *csAttempt) newStream() error {
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
- s, err := a.t.NewStream(a.ctx, cs.callHdr)
+ s, err := a.transport.NewStream(a.ctx, cs.callHdr)
if err != nil {
nse, ok := err.(*transport.NewStreamError)
if !ok {
@@ -515,9 +518,9 @@ func (a *csAttempt) newStream() error {
// Unwrap and convert error.
return toRPCErr(nse.Err)
}
- a.s = s
+ a.transportStream = s
a.ctx = s.Context()
- a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
+ a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -529,9 +532,9 @@ type clientStream struct {
cc *ClientConn
desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
+ codec baseCodec
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
cancel context.CancelFunc // cancels all attempts
@@ -566,26 +569,31 @@ type clientStream struct {
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- onCommit func()
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
+ committed bool // active attempt committed for retry?
+ onCommit func()
+ replayBuffer []replayOp // operations to replay on retry
+ replayBufferSize int // current size of replayBuffer
+}
+
+type replayOp struct {
+ op func(a *csAttempt) error
+ cleanup func()
}
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
- ctx context.Context
- cs *clientStream
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- pickResult balancer.PickResult
-
- finished bool
- dc Decompressor
- decomp encoding.Compressor
- decompSet bool
+ ctx context.Context
+ cs *clientStream
+ transport transport.ClientTransport
+ transportStream *transport.ClientStream
+ parser *parser
+ pickResult balancer.PickResult
+
+ finished bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ decompressorSet bool
mu sync.Mutex // guards trInfo.tr
// trInfo may be nil (if EnableTracing is false).
@@ -607,7 +615,12 @@ func (cs *clientStream) commitAttemptLocked() {
cs.onCommit()
}
cs.committed = true
- cs.buffer = nil
+ for _, op := range cs.replayBuffer {
+ if op.cleanup != nil {
+ op.cleanup()
+ }
+ }
+ cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@@ -626,14 +639,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
// RPC is finished or committed or was dropped by the picker; cannot retry.
return false, err
}
- if a.s == nil && a.allowTransparentRetry {
+ if a.transportStream == nil && a.allowTransparentRetry {
return true, nil
}
// Wait for the trailers.
unprocessed := false
- if a.s != nil {
- <-a.s.Done()
- unprocessed = a.s.Unprocessed()
+ if a.transportStream != nil {
+ <-a.transportStream.Done()
+ unprocessed = a.transportStream.Unprocessed()
}
if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
@@ -645,14 +658,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
pushback := 0
hasPushback := false
- if a.s != nil {
- if !a.s.TrailersOnly() {
+ if a.transportStream != nil {
+ if !a.transportStream.TrailersOnly() {
return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
// before considering this a failure for throttling.
- sps := a.s.Trailer()["grpc-retry-pushback-ms"]
+ sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"]
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
@@ -669,8 +682,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
}
var code codes.Code
- if a.s != nil {
- code = a.s.Status().Code()
+ if a.transportStream != nil {
+ code = a.transportStream.Status().Code()
} else {
code = status.Code(err)
}
@@ -695,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
cs.numRetriesSincePushback = 0
} else {
fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := float64(rp.InitialBackoff) * fact
- if max := float64(rp.MaxBackoff); cur > max {
- cur = max
- }
- dur = time.Duration(grpcrand.Int63n(int64(cur)))
+ cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
+ // Apply jitter by multiplying with a random factor between 0.8 and 1.2
+ cur *= 0.8 + 0.4*rand.Float64()
+ dur = time.Duration(int64(cur))
cs.numRetriesSincePushback++
}
@@ -732,7 +744,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
// the stream is canceled.
return err
}
- // Note that the first op in the replay buffer always sets cs.attempt
+ // Note that the first op in replayBuffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
@@ -744,8 +756,8 @@ func (cs *clientStream) Context() context.Context {
cs.commitAttempt()
// No need to lock before using attempt, since we know it is committed and
// cannot change.
- if cs.attempt.s != nil {
- return cs.attempt.s.Context()
+ if cs.attempt.transportStream != nil {
+ return cs.attempt.transportStream.Context()
}
return cs.ctx
}
@@ -761,7 +773,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
// already be status errors.
return toRPCErr(op(cs.attempt))
}
- if len(cs.buffer) == 0 {
+ if len(cs.replayBuffer) == 0 {
// For the first op, which controls creation of the stream and
// assigns cs.attempt, we need to create a new attempt inline
// before executing the first op. On subsequent ops, the attempt
@@ -782,9 +794,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
continue
}
if err == io.EOF {
- <-a.s.Done()
+ <-a.transportStream.Done()
}
- if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+ if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) {
onSuccess()
cs.mu.Unlock()
return err
@@ -800,7 +812,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
var m metadata.MD
err := cs.withRetry(func(a *csAttempt) error {
var err error
- m, err = a.s.Header()
+ m, err = a.transportStream.Header()
return toRPCErr(err)
}, cs.commitAttemptLocked)
@@ -844,32 +856,33 @@ func (cs *clientStream) Trailer() metadata.MD {
// directions -- it will prevent races and should not meaningfully impact
// performance.
cs.commitAttempt()
- if cs.attempt.s == nil {
+ if cs.attempt.transportStream == nil {
return nil
}
- return cs.attempt.s.Trailer()
+ return cs.attempt.transportStream.Trailer()
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
- for _, f := range cs.buffer {
- if err := f(attempt); err != nil {
+ for _, f := range cs.replayBuffer {
+ if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.replayBufferSize += sz
+ if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
+ cleanup()
return
}
- cs.buffer = append(cs.buffer, op)
+ cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
func (cs *clientStream) SendMsg(m any) (err error) {
@@ -891,23 +904,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if payloadLen > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
+
+ // always take an extra ref in case data == payload (i.e. when the data isn't
+ // compressed). The original ref will always be freed by the deferred free above.
+ payload.Ref()
op := func(a *csAttempt) error {
- return a.sendMsg(m, hdr, payload, data)
+ return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
+ }
+
+ // onSuccess is invoked when the op is captured for a subsequent retry. If the
+ // stream was established by a previous message and therefore retries are
+ // disabled, onSuccess will not be invoked, and payloadRef can be freed
+ // immediately.
+ onSuccessCalled := false
+ err = cs.withRetry(op, func() {
+ cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+ onSuccessCalled = true
+ })
+ if !onSuccessCalled {
+ payload.Free()
}
- err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, cm)
@@ -924,6 +964,7 @@ func (cs *clientStream) RecvMsg(m any) error {
var recvInfo *payloadInfo
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
+ defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
@@ -931,7 +972,7 @@ func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
- Message: recvInfo.uncompressedBytes,
+ Message: recvInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, sm)
@@ -951,14 +992,14 @@ func (cs *clientStream) CloseSend() error {
}
cs.sentLast = true
op := func(a *csAttempt) error {
- a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+ a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
// RecvMsg. This also matches historical behavior.
return nil
}
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
@@ -989,7 +1030,7 @@ func (cs *clientStream) finish(err error) {
if cs.attempt != nil {
cs.attempt.finish(err)
// after functions all rely upon having a stream.
- if cs.attempt.s != nil {
+ if cs.attempt.transportStream != nil {
for _, o := range cs.opts {
o.after(cs.callInfo, cs.attempt)
}
@@ -1034,7 +1075,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
-func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -1043,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
a.mu.Unlock()
}
- if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+ if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
if !cs.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1052,11 +1093,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
return io.EOF
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
- }
- if channelz.IsOn() {
- a.t.IncrMsgSent()
+ if len(a.statsHandlers) != 0 {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+ }
}
return nil
}
@@ -1065,28 +1105,28 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if !a.decompSet {
+ if !a.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.dc == nil || a.dc.Type() != ct {
+ if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if a.decompressorV0 == nil || a.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- a.dc = nil
- a.decomp = encoding.GetCompressor(ct)
+ a.decompressorV0 = nil
+ a.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- a.dc = nil
+ a.decompressorV0 = nil
}
// Only initialize this state once per stream.
- a.decompSet = true
+ a.decompressorSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
- if err != nil {
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := a.s.Status().Err(); statusErr != nil {
+ if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
return io.EOF // indicates successful end of stream.
@@ -1103,33 +1143,26 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Length: len(payInfo.uncompressedBytes),
+ Length: payInfo.uncompressedBytes.Len(),
})
}
- if channelz.IsOn() {
- a.t.IncrMsgRecv()
- }
if cs.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
return nil
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
- return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (a *csAttempt) finish(err error) {
@@ -1144,20 +1177,20 @@ func (a *csAttempt) finish(err error) {
err = nil
}
var tr metadata.MD
- if a.s != nil {
- a.t.CloseStream(a.s, err)
- tr = a.s.Trailer()
+ if a.transportStream != nil {
+ a.transportStream.Close(err)
+ tr = a.transportStream.Trailer()
}
if a.pickResult.Done != nil {
br := false
- if a.s != nil {
- br = a.s.BytesReceived()
+ if a.transportStream != nil {
+ br = a.transportStream.BytesReceived()
}
a.pickResult.Done(balancer.DoneInfo{
Err: err,
Trailer: tr,
- BytesSent: a.s != nil,
+ BytesSent: a.transportStream != nil,
BytesReceived: br,
ServerLoad: balancerload.Parse(tr),
})
@@ -1185,12 +1218,12 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@@ -1239,7 +1272,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
// if set.
var cp Compressor
var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ if ct := c.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
comp = encoding.GetCompressor(ct)
@@ -1247,9 +1280,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if ac.cc.dopts.cp != nil {
- callHdr.SendCompress = ac.cc.dopts.cp.Type()
- cp = ac.cc.dopts.cp
+ } else if ac.cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = ac.cc.dopts.compressorV0.Type()
+ cp = ac.cc.dopts.compressorV0
}
if c.creds != nil {
callHdr.Creds = c.creds
@@ -1257,26 +1290,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{
- callHdr: callHdr,
- ac: ac,
- ctx: ctx,
- cancel: cancel,
- opts: opts,
- callInfo: c,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- t: t,
- }
-
- s, err := as.t.NewStream(as.ctx, as.callHdr)
+ callHdr: callHdr,
+ ac: ac,
+ ctx: ctx,
+ cancel: cancel,
+ opts: opts,
+ callInfo: c,
+ desc: desc,
+ codec: c.codec,
+ sendCompressorV0: cp,
+ sendCompressorV1: comp,
+ transport: t,
+ }
+
+ s, err := as.transport.NewStream(as.ctx, as.callHdr)
if err != nil {
err = toRPCErr(err)
return nil, err
}
- as.s = s
- as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
+ as.transportStream = s
+ as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1302,29 +1335,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
}
type addrConnStream struct {
- s *transport.Stream
- ac *addrConn
- callHdr *transport.CallHdr
- cancel context.CancelFunc
- opts []CallOption
- callInfo *callInfo
- t transport.ClientTransport
- ctx context.Context
- sentLast bool
- desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
- decompSet bool
- dc Decompressor
- decomp encoding.Compressor
- p *parser
- mu sync.Mutex
- finished bool
+ transportStream *transport.ClientStream
+ ac *addrConn
+ callHdr *transport.CallHdr
+ cancel context.CancelFunc
+ opts []CallOption
+ callInfo *callInfo
+ transport transport.ClientTransport
+ ctx context.Context
+ sentLast bool
+ desc *StreamDesc
+ codec baseCodec
+ sendCompressorV0 Compressor
+ sendCompressorV1 encoding.Compressor
+ decompressorSet bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ parser *parser
+
+ // mu guards finished and is held for the entire finish method.
+ mu sync.Mutex
+ finished bool
}
func (as *addrConnStream) Header() (metadata.MD, error) {
- m, err := as.s.Header()
+ m, err := as.transportStream.Header()
if err != nil {
as.finish(toRPCErr(err))
}
@@ -1332,7 +1367,7 @@ func (as *addrConnStream) Header() (metadata.MD, error) {
}
func (as *addrConnStream) Trailer() metadata.MD {
- return as.s.Trailer()
+ return as.transportStream.Trailer()
}
func (as *addrConnStream) CloseSend() error {
@@ -1342,7 +1377,7 @@ func (as *addrConnStream) CloseSend() error {
}
as.sentLast = true
- as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+ as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
@@ -1351,7 +1386,7 @@ func (as *addrConnStream) CloseSend() error {
}
func (as *addrConnStream) Context() context.Context {
- return as.s.Context()
+ return as.transportStream.Context()
}
func (as *addrConnStream) SendMsg(m any) (err error) {
@@ -1373,17 +1408,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payld) > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ if payload.Len() > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1393,9 +1437,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
return io.EOF
}
- if channelz.IsOn() {
- as.t.IncrMsgSent()
- }
return nil
}
@@ -1407,26 +1448,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
}
}()
- if !as.decompSet {
+ if !as.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if as.dc == nil || as.dc.Type() != ct {
+ if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if as.decompressorV0 == nil || as.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- as.dc = nil
- as.decomp = encoding.GetCompressor(ct)
+ as.decompressorV0 = nil
+ as.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- as.dc = nil
+ as.decompressorV0 = nil
}
// Only initialize this state once per stream.
- as.decompSet = true
+ as.decompressorSet = true
}
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err != nil {
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := as.s.Status().Err(); statusErr != nil {
+ if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
return io.EOF // indicates successful end of stream.
@@ -1434,9 +1474,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
return toRPCErr(err)
}
- if channelz.IsOn() {
- as.t.IncrMsgRecv()
- }
if as.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
return nil
@@ -1444,14 +1481,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
- return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (as *addrConnStream) finish(err error) {
@@ -1465,8 +1500,8 @@ func (as *addrConnStream) finish(err error) {
// Ending a stream with EOF indicates a success.
err = nil
}
- if as.s != nil {
- as.t.CloseStream(as.s, err)
+ if as.transportStream != nil {
+ as.transportStream.Close(err)
}
if err != nil {
@@ -1533,15 +1568,14 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
ctx context.Context
- t transport.ServerTransport
- s *transport.Stream
+ s *transport.ServerStream
p *parser
codec baseCodec
- cp Compressor
- dc Decompressor
- comp encoding.Compressor
- decomp encoding.Compressor
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
sendCompressorName string
@@ -1584,7 +1618,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
return status.Error(codes.Internal, err.Error())
}
- err = ss.t.WriteHeader(ss.s, md)
+ err = ss.s.SendHeader(md)
if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
sh := &binarylog.ServerHeader{
@@ -1624,7 +1658,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if err != nil && err != io.EOF {
st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
// Non-user specified status was sent out. This should be an error
// case (as a server side Cancel maybe).
//
@@ -1632,31 +1666,41 @@ func (ss *serverStream) SendMsg(m any) (err error) {
// status from the service handler, we will log that error instead.
// This behavior is similar to an interceptor.
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgSent()
- }
}()
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
- ss.comp = encoding.GetCompressor(sendCompressorsName)
+ ss.compressorV1 = encoding.GetCompressor(sendCompressorsName)
ss.sendCompressorName = sendCompressorsName
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if payloadLen > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
- if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+ if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
return toRPCErr(err)
}
+
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
@@ -1669,7 +1713,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
}
sm := &binarylog.ServerMessage{
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, sm)
@@ -1677,7 +1721,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
}
return nil
@@ -1699,7 +1743,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if err != nil && err != io.EOF {
st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
// Non-user specified status was sent out. This should be an error
// case (as a server side Cancel maybe).
//
@@ -1707,15 +1751,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
// status from the service handler, we will log that error instead.
// This behavior is similar to an interceptor.
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgRecv()
- }
}()
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1726,18 +1768,16 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
return err
}
if err == io.ErrUnexpectedEOF {
- err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- Length: len(payInfo.uncompressedBytes),
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
@@ -1745,7 +1785,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes,
+ Message: payInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, cm)
@@ -1760,23 +1800,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+ return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, nil, 0, err
}
- compData, err := compress(data, cp, comp)
+ compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
- return nil, nil, nil, err
+ data.Free()
+ return nil, nil, nil, 0, err
}
- hdr, payload = msgHeader(data, compData)
- return hdr, payload, data, nil
+ hdr, payload = msgHeader(data, compData, pf)
+ return hdr, data, payload, pf, nil
}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
index 8b813529c..0037fee0b 100644
--- a/vendor/google.golang.org/grpc/stream_interfaces.go
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -22,15 +22,35 @@ package grpc
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
type ServerStreamingClient[Res any] interface {
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
// ServerStreamingServer represents the server side of a server-streaming (one
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type ServerStreamingServer[Res any] interface {
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface {
// message stream and the type of the unary response message. It is used in
// generated code.
type ClientStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using CloseAndRecv().
Send(*Req) error
+
+ // CloseAndRecv closes the request stream and waits for the server's
+ // response. This method must be called once and only once after sending
+ // all request messages. Any error returned is implemented by the status
+ // package.
CloseAndRecv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
@@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface {
// requests, one response) RPC. It is generic over both the type of the request
// message stream and the type of the unary response message. It is used in
// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
type ClientStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseAndRecv on its
+ // ClientStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // SendAndClose sends a single response message to the client and closes
+ // the stream. This method must be called once and only once after all
+ // request messages have been processed. Recv should not be called after
+ // calling SendAndClose.
SendAndClose(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface {
// request message stream and the type of the response message stream. It is
// used in generated code.
type BidiStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using Recv().
Send(*Req) error
+
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, Trailer, and
+ // CloseSend functionality. No other methods in the ClientStream should be
+ // called directly.
ClientStream
}
@@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface {
// (many requests, many responses) RPC. It is generic over both the type of the
// request message stream and the type of the response message stream. It is
// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type BidiStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseSend on its
+ // BidiStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index a0b782890..783c41f78 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.64.1"
+const Version = "1.71.0"
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index bb2966e3b..737d6876d 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro
fd = fieldDescs.ByTextName(name)
}
}
- if flags.ProtoLegacy {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
if fd == nil {
// Field is unknown.
@@ -351,7 +346,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
- return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 29846df22..0e72d8537 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
}
v := m.Get(fd)
- isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
- isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
- if isProto2Scalar || isSingularMessage {
+ if fd.HasPresence() {
if m.skipNull {
continue
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 4b177c820..e9fe10394 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
switch tok.Kind() {
case json.ObjectClose:
if !found {
- return d.newError(tok.Pos(), `missing "value" field`)
+ // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
+ // for compatibility with other proto runtimes that have interpreted the spec differently.
+ if m.Descriptor().FullName() != genid.Empty_message_fullname {
+ return d.newError(tok.Pos(), `missing "value" field`)
+ }
}
return nil
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 24bc98ac4..b53805056 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
- if flags.ProtoLegacy {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
// Handle unknown fields.
if fd == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c8..024ffebd3 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
//
// Each variable is populated with a nil pointer to the options struct.
var (
- File pref.ProtoMessage
- Enum pref.ProtoMessage
- EnumValue pref.ProtoMessage
- Message pref.ProtoMessage
- Field pref.ProtoMessage
- Oneof pref.ProtoMessage
- ExtensionRange pref.ProtoMessage
- Service pref.ProtoMessage
- Method pref.ProtoMessage
+ File protoreflect.ProtoMessage
+ Enum protoreflect.ProtoMessage
+ EnumValue protoreflect.ProtoMessage
+ Message protoreflect.ProtoMessage
+ Field protoreflect.ProtoMessage
+ Oneof protoreflect.ProtoMessage
+ ExtensionRange protoreflect.ProtoMessage
+ Service protoreflect.ProtoMessage
+ Method protoreflect.ProtoMessage
)
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index ff6a38360..323829da1 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 029a6a12d..bf1aba0e8 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,9 +5,14 @@
// Package editionssupport defines constants for editions that are supported.
package editionssupport
-import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+import "google.golang.org/protobuf/types/descriptorpb"
const (
Minimum = descriptorpb.Edition_EDITION_PROTO2
Maximum = descriptorpb.Edition_EDITION_2023
+
+ // MaximumKnown is the maximum edition that is known to Go Protobuf, but not
+ // declared as supported. In other words: end users cannot use it, but
+ // testprotos inside Go Protobuf can.
+ MaximumKnown = descriptorpb.Edition_EDITION_2024
)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 7e87c7604..669133d04 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0))
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
-// This does not populate the Enum or Message (except for weak message).
+// This does not populate the Enum or Message.
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
@@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
}
case s == "packed":
f.L1.EditionFeatures.IsPacked = true
- case strings.HasPrefix(s, "weak="):
- f.L1.IsWeak = true
- f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
- if fd.IsWeak() {
- tag = append(tag, "weak="+string(fd.Message().FullName()))
- }
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
deleted file mode 100644
index fbcd34920..000000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-// +build !go1.13
-
-package errors
-
-import "reflect"
-
-// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
-func Is(err, target error) bool {
- if target == nil {
- return err == target
- }
-
- isComparable := reflect.TypeOf(target).Comparable()
- for {
- if isComparable && err == target {
- return true
- }
- if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
- return true
- }
- if err = unwrap(err); err == nil {
- return false
- }
- }
-}
-
-func unwrap(err error) error {
- u, ok := err.(interface {
- Unwrap() error
- })
- if !ok {
- return nil
- }
- return u.Unwrap()
-}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
deleted file mode 100644
index 5e72f1cde..000000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-package errors
-
-import "errors"
-
-// Is is errors.Is.
-func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40b..688aabe43 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -19,7 +19,6 @@ import (
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
@@ -32,6 +31,7 @@ const (
EditionProto2 Edition = 998
EditionProto3 Edition = 999
Edition2023 Edition = 1000
+ Edition2024 Edition = 1001
EditionUnsupported Edition = 100000
)
@@ -77,31 +77,48 @@ type (
Locations SourceLocations
}
+ // EditionFeatures is a frequently-instantiated struct, so please take care
+ // to minimize padding when adding new fields to this struct (add them in
+ // the right place/order).
EditionFeatures struct {
+ // StripEnumPrefix determines if the plugin generates enum value
+ // constants as-is, with their prefix stripped, or both variants.
+ StripEnumPrefix int
+
// IsFieldPresence is true if field_presence is EXPLICIT
// https://protobuf.dev/editions/features/#field_presence
IsFieldPresence bool
+
// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
// https://protobuf.dev/editions/features/#field_presence
IsLegacyRequired bool
+
// IsOpenEnum is true if enum_type is OPEN
// https://protobuf.dev/editions/features/#enum_type
IsOpenEnum bool
+
// IsPacked is true if repeated_field_encoding is PACKED
// https://protobuf.dev/editions/features/#repeated_field_encoding
IsPacked bool
+
// IsUTF8Validated is true if utf_validation is VERIFY
// https://protobuf.dev/editions/features/#utf8_validation
IsUTF8Validated bool
+
// IsDelimitedEncoded is true if message_encoding is DELIMITED
// https://protobuf.dev/editions/features/#message_encoding
IsDelimitedEncoded bool
+
// IsJSONCompliant is true if json_format is ALLOW
// https://protobuf.dev/editions/features/#json_format
IsJSONCompliant bool
+
// GenerateLegacyUnmarshalJSON determines if the plugin generates the
// UnmarshalJSON([]byte) error method for enums.
GenerateLegacyUnmarshalJSON bool
+ // APILevel controls which API (Open, Hybrid or Opaque) should be used
+ // for generated code (.pb.go files).
+ APILevel int
}
)
@@ -257,7 +274,7 @@ type (
Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsWeak bool // promoted from google.protobuf.FieldOptions
+ IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -350,7 +367,8 @@ func (fd *Field) IsPacked() bool {
return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
-func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsWeak() bool { return false }
+func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -376,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor {
return fd.L1.Enum
}
func (fd *Field) Message() protoreflect.MessageDescriptor {
- if fd.L1.IsWeak {
- if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
- return d.(protoreflect.MessageDescriptor)
- }
- }
return fd.L1.Message
}
func (fd *Field) IsMapEntry() bool {
@@ -425,6 +438,7 @@ type (
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
+ IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
@@ -465,6 +479,7 @@ func (xd *Extension) IsPacked() bool {
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b0..d2f549497 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ xd.L1.IsLazy = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8d..d4c94458b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -32,11 +32,6 @@ func (file *File) resolveMessages() {
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
- // Weak fields are resolved upon actual use.
- if fd.L1.IsWeak {
- continue
- }
-
// Resolve message field dependency.
switch fd.L1.Kind {
case protoreflect.EnumKind:
@@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) {
switch num {
case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
- case genid.FileDescriptorProto_WeakDependency_field_number:
- fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -502,8 +495,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
- case genid.FieldOptions_Weak_field_number:
- fd.L1.IsWeak = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356b..b08b71830 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeVarint(b)
b = b[m:]
parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+ case genid.GoFeatures_ApiLevel_field_number:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ parent.APILevel = int(v)
+ case genid.GoFeatures_StripEnumPrefix_field_number:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ parent.StripEnumPrefix = int(v)
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
}
@@ -61,6 +69,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
case genid.FeatureSet_JsonFormat_field_number:
parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+ case genid.FeatureSet_EnforceNamingStyle_field_number:
+ // EnforceNamingStyle is enforced in protoc, languages other than C++
+ // are not supposed to do anything with this feature.
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
}
@@ -68,7 +79,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+ case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index ba83fea44..e1b4130bd 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -63,7 +63,7 @@ type Builder struct {
// message declarations in "flattened ordering".
//
// Dependencies are Go types for enums or messages referenced by
- // message fields (excluding weak fields), for parent extended messages of
+ // message fields, for parent extended messages of
// extension fields, for enums or messages referenced by extension fields,
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
index 58372dd34..a06ccabc2 100644
--- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -6,7 +6,7 @@
package flags
// ProtoLegacy specifies whether to enable support for legacy functionality
-// such as MessageSets, weak fields, and various other obscure behavior
+// such as MessageSets, and various other obscure behavior
// that is necessary to maintain backwards compatibility with proto1 or
// the pre-release variants of proto2 and proto3.
//
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index f30ab6b58..39524782a 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -1014,6 +1014,7 @@ const (
FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+ FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style"
FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
@@ -1021,6 +1022,7 @@ const (
FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
)
// Field numbers for google.protobuf.FeatureSet.
@@ -1031,6 +1033,7 @@ const (
FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7
)
// Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1112,6 +1115,19 @@ const (
FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2
)
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+ FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+ FeatureSet_STYLE2024_enum_value = 1
+ FeatureSet_STYLE_LEGACY_enum_value = 2
+)
+
// Names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd0121..d9b9d916a 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
// and the well-known types.
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b4..f5ee7f5c2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,59 @@ import (
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
- GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+ GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+ GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level"
+ GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix"
- GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level"
+ GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
)
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+ GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2
+ GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3
+)
+
+// Full and short names for pb.GoFeatures.APILevel.
+const (
+ GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
+ GoFeatures_APILevel_enum_name = "APILevel"
+)
+
+// Enum values for pb.GoFeatures.APILevel.
+const (
+ GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
+ GoFeatures_API_OPEN_enum_value = 1
+ GoFeatures_API_HYBRID_enum_value = 2
+ GoFeatures_API_OPAQUE_enum_value = 3
+)
+
+// Full and short names for pb.GoFeatures.StripEnumPrefix.
+const (
+ GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
+ GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix"
+)
+
+// Enum values for pb.GoFeatures.StripEnumPrefix.
+const (
+ GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0
+ GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1
+ GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
+ GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3
+)
+
+// Extension numbers
+const (
+ FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
index 693d2e9e1..99bb95baf 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/goname.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -11,15 +11,10 @@ const (
SizeCache_goname = "sizeCache"
SizeCacheA_goname = "XXX_sizecache"
- WeakFields_goname = "weakFields"
- WeakFieldsA_goname = "XXX_weak"
-
UnknownFields_goname = "unknownFields"
UnknownFieldsA_goname = "XXX_unrecognized"
ExtensionFields_goname = "extensionFields"
ExtensionFieldsA_goname = "XXX_InternalExtensions"
ExtensionFieldsB_goname = "XXX_extensions"
-
- WeakFieldPrefix_goname = "XXX_weak_"
)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02ff..bef5a25fb 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
new file mode 100644
index 000000000..224f33930
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
@@ -0,0 +1,12 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+const (
+ NoUnkeyedLiteral_goname = "noUnkeyedLiteral"
+ NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
+
+ BuilderSuffix_goname = "_builder"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b85..9404270de 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
new file mode 100644
index 000000000..6075d6f69
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "strconv"
+ "sync/atomic"
+ "unsafe"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (Export) UnmarshalField(msg any, fieldNum int32) {
+ UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
+}
+
+// Present checks the presence set for a certain field number (zero
+// based, ordered by appearance in original proto file). part is
+// a pointer to the correct element in the bitmask array, num is the
+// field number unaltered. Example (field number 70 -> part =
+// &m.XXX_presence[1], num = 70)
+func (Export) Present(part *uint32, num uint32) bool {
+ // This hook will read an unprotected shadow presence set if
+ // we're unning under the race detector
+ raceDetectHookPresent(part, num)
+ return atomic.LoadUint32(part)&(1<<(num%32)) > 0
+}
+
+// SetPresent adds a field to the presence set. part is a pointer to
+// the relevant element in the array and num is the field number
+// unaltered. size is the number of fields in the protocol
+// buffer.
+func (Export) SetPresent(part *uint32, num uint32, size uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookSetPresent(part, num, presenceSize(size))
+ for {
+ old := atomic.LoadUint32(part)
+ if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
+ return
+ }
+ }
+}
+
+// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
+// It is meant for use by builder methods, where the message is known not
+// to be accessible yet by other goroutines.
+func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookSetPresent(part, num, presenceSize(size))
+ *part |= 1 << (num % 32)
+}
+
+// ClearPresence removes a field from the presence set. part is a
+// pointer to the relevant element in the presence array and num is
+// the field number unaltered.
+func (Export) ClearPresent(part *uint32, num uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookClearPresent(part, num)
+ for {
+ old := atomic.LoadUint32(part)
+ if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
+ return
+ }
+ }
+}
+
+// interfaceToPointer takes a pointer to an empty interface whose value is a
+// pointer type, and converts it into a "pointer" that points to the same
+// target
+func interfaceToPointer(i *any) pointer {
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+func (p pointer) atomicGetPointer() pointer {
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) atomicSetPointer(q pointer) {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
+}
+
+// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
+// pointer) and returns true if the pointed-to pointer is nil (using an
+// atomic load). This function is inlineable and, on x86, just becomes a
+// simple load and compare.
+func (Export) AtomicCheckPointerIsNil(ptr any) bool {
+ return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
+}
+
+// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
+// second is a pointer) and atomically sets the second pointer into location
+// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline
+// (even on x86), so this does not become a simple store on x86.
+func (Export) AtomicSetPointer(dstPtr, valPtr any) {
+ interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
+}
+
+// AtomicLoadPointer loads the pointer at the location pointed at by src,
+// and stores that pointer value into the location pointed at by dst.
+func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
+ *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+}
+
+// AtomicInitializePointer makes ptr and dst point to the same value.
+//
+// If *ptr is a nil pointer, it sets *ptr = *dst.
+//
+// If *ptr is a non-nil pointer, it sets *dst = *ptr.
+func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
+ if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
+ *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+ }
+}
+
+// MessageFieldStringOf returns the field formatted as a string,
+// either as the field name if resolvable otherwise as a decimal string.
+func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
+ fd := md.Fields().ByNumber(n)
+ if fd != nil {
+ return string(fd.Name())
+ }
+ return strconv.Itoa(int(n))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
new file mode 100644
index 000000000..ea276547c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package impl
+
+// There is no additional data as we're not running under race detector.
+type RaceDetectHookData struct{}
+
+// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
+func (presence) raceDetectHookPresent(num uint32) {}
+func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
+func (presence) raceDetectHookClearPresent(num uint32) {}
+func (presence) raceDetectHookAllocAndCopy(src presence) {}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
new file mode 100644
index 000000000..e9a27583a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
@@ -0,0 +1,126 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package impl
+
+// When running under race detector, we add a presence map of bytes, that we can access
+// in the hook functions so that we trigger the race detection whenever we have concurrent
+// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
+// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
+type RaceDetectHookData struct {
+ shadowPresence *[]byte
+}
+
+// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
+// using non-atomic operations.
+func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
+ sp := make([]byte, size)
+ atomicStoreShadowPresence(&data.shadowPresence, &sp)
+}
+
+func (p presence) raceDetectHookPresent(num uint32) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ _ = (*sp)[num]
+ }
+}
+
+func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp == nil {
+ data.raceDetectHookAlloc(size)
+ sp = atomicLoadShadowPresence(&data.shadowPresence)
+ }
+ (*sp)[num] = 1
+}
+
+func (p presence) raceDetectHookClearPresent(num uint32) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ (*sp)[num] = 0
+
+ }
+}
+
+// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
+// shadowPresence bytes from src to lazy.
+func (p presence) raceDetectHookAllocAndCopy(q presence) {
+ sData := q.toRaceDetectData()
+ dData := p.toRaceDetectData()
+ if sData == nil {
+ return
+ }
+ srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
+ if srcSp == nil {
+ atomicStoreShadowPresence(&dData.shadowPresence, nil)
+ return
+ }
+ n := len(*srcSp)
+ dSlice := make([]byte, n)
+ atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
+ for i := 0; i < n; i++ {
+ dSlice[i] = (*srcSp)[i]
+ }
+}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ _ = (*sp)[num]
+ }
+}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp == nil {
+ data.raceDetectHookAlloc(size)
+ sp = atomicLoadShadowPresence(&data.shadowPresence)
+ }
+ (*sp)[num] = 1
+}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ (*sp)[num] = 0
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index f29e6a8fa..fe2c719ce 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
}
return nil
}
+
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
if mi.extensionOffset.IsValid() {
e := p.Apply(mi.extensionOffset).Extensions()
if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
if !f.isRequired && f.funcs.isInit == nil {
continue
}
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ continue
+ }
+ if f.funcs.isInit != nil {
+ f.mi.init()
+ if f.mi.needsInitCheck {
+ if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
+ lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+ if !lazy.AllowedPartial() {
+ // Nothing to see here, it was checked on unmarshal
+ continue
+ }
+ mi.lazyUnmarshal(p, f.num)
+ }
+ if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+
fptr := p.Apply(f.offset)
if f.isPointer && fptr.Elem().IsNil() {
if f.isRequired {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a20..0d5b546e0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
- fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
- f.lazy.value = f.lazy.fn()
+ panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
- f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
- f.typ = t
- f.lazy = &lazyExtensionValue{fn: fn}
-}
-
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e44..d14d7d93c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -5,15 +5,12 @@
package impl
import (
- "fmt"
"reflect"
- "sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -65,6 +62,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
+ if cf.funcs.isInit == nil {
+ out.initialized = true
+ }
vi.Set(vw)
return out, nil
}
@@ -118,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
}
}
-func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- })
- }
-
- return pointerCoderFuncs{
- size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return 0
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return sizeMessage(m, f.tagsize, opts)
- },
- marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return b, nil
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return appendMessage(b, m, f.wiretag, opts)
- },
- unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
- fs := p.WeakFields()
- m, ok := fs.get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- return unmarshalOutput{}, errUnknown
- }
- m = messageType.New().Interface()
- fs.set(f.num, m)
- }
- return consumeMessage(b, m, wtyp, opts)
- },
- isInit: func(p pointer, f *coderFieldInfo) error {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return nil
- }
- return proto.CheckInitialized(m)
- },
- merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
- sm, ok := src.WeakFields().get(f.num)
- if !ok {
- return
- }
- dm, ok := dst.WeakFields().get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- dm = messageType.New().Interface()
- dst.WeakFields().set(f.num, dm)
- }
- opts.Merge(dm, sm)
- },
- }
-}
-
func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
new file mode 100644
index 000000000..76818ea25
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
@@ -0,0 +1,264 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ mi := getMessageInfo(ft)
+ if mi == nil {
+ panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
+ }
+ switch fd.Kind() {
+ case protoreflect.MessageKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueMessage,
+ marshal: appendOpaqueMessage,
+ unmarshal: consumeOpaqueMessage,
+ isInit: isInitOpaqueMessage,
+ merge: mergeOpaqueMessage,
+ }
+ case protoreflect.GroupKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueGroup,
+ marshal: appendOpaqueGroup,
+ unmarshal: consumeOpaqueGroup,
+ isInit: isInitOpaqueMessage,
+ merge: mergeOpaqueMessage,
+ }
+ }
+ panic("unexpected field kind")
+}
+
+func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
+}
+
+func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ mp := p.AtomicGetPointer()
+ calculatedSize := f.mi.sizePointer(mp, opts)
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := f.mi.marshalAppendPointer(b, mp, opts)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
+}
+
+func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ return nil
+ }
+ return f.mi.checkInitializedPointer(mp)
+}
+
+func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstmp := dst.AtomicGetPointer()
+ if dstmp.IsNil() {
+ dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
+}
+
+func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
+}
+
+func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ return b, err
+}
+
+func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
+ return o, e
+}
+
+func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
+ }
+ mt := ft.Elem().Elem() // *[]*T -> *T
+ mi := getMessageInfo(mt)
+ if mi == nil {
+ panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
+ }
+ switch fd.Kind() {
+ case protoreflect.MessageKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueMessageSlice,
+ marshal: appendOpaqueMessageSlice,
+ unmarshal: consumeOpaqueMessageSlice,
+ isInit: isInitOpaqueMessageSlice,
+ merge: mergeOpaqueMessageSlice,
+ }
+ case protoreflect.GroupKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueGroupSlice,
+ marshal: appendOpaqueGroupSlice,
+ unmarshal: consumeOpaqueGroupSlice,
+ isInit: isInitOpaqueMessageSlice,
+ merge: mergeOpaqueMessageSlice,
+ }
+ }
+ panic("unexpected field kind")
+}
+
+func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.AtomicGetPointer().PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+ }
+ return n
+}
+
+func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.AtomicGetPointer().PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ siz := f.mi.sizePointer(v, opts)
+ b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
+ }
+ return b, nil
+}
+
+func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ sp.AppendPointerSlice(mp)
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ return nil
+ }
+ s := sp.PointerSlice()
+ for _, v := range s {
+ if err := f.mi.checkInitializedPointer(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ ds := dst.AtomicGetPointer()
+ if ds.IsNil() {
+ ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ for _, sp := range src.AtomicGetPointer().PointerSlice() {
+ dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ f.mi.mergePointer(dm, sp, opts)
+ ds.AppendPointerSlice(dm)
+ }
+}
+
+func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.AtomicGetPointer().PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+ }
+ return n
+}
+
+func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.AtomicGetPointer().PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
+ if err != nil {
+ return out, err
+ }
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ sp.AppendPointerSlice(mp)
+ return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index fb35f0bae..229c69801 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
return 0
}
n := 0
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
@@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o
if opts.Deterministic() {
return appendMapDeterministic(b, mapv, mapi, f, opts)
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
var err error
b = protowire.AppendVarint(b, f.wiretag)
@@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
if !mi.needsInitCheck {
return nil
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := pointerOfValue(iter.Value())
if err := mi.checkInitializedPointer(val); err != nil {
@@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
}
}
} else {
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := mapi.conv.valConv.PBValueOf(iter.Value())
if err := mapi.valFuncs.isInit(val); err != nil {
@@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), iter.Value())
}
@@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
}
@@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
val := reflect.New(f.ft.Elem().Elem())
if f.mi != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
deleted file mode 100644
index 4b15493f2..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.12
-// +build !go1.12
-
-package impl
-
-import "reflect"
-
-type mapIter struct {
- v reflect.Value
- keys []reflect.Value
-}
-
-// mapRange provides a less-efficient equivalent to
-// the Go 1.12 reflect.Value.MapRange method.
-func mapRange(v reflect.Value) *mapIter {
- return &mapIter{v: v}
-}
-
-func (i *mapIter) Next() bool {
- if i.keys == nil {
- i.keys = i.v.MapKeys()
- } else {
- i.keys = i.keys[1:]
- }
- return len(i.keys) > 0
-}
-
-func (i *mapIter) Key() reflect.Value {
- return i.keys[0]
-}
-
-func (i *mapIter) Value() reflect.Value {
- return i.v.MapIndex(i.keys[0])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
deleted file mode 100644
index 0b31b66ea..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package impl
-
-import "reflect"
-
-func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb73..f78b57b04 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,6 +32,10 @@ type coderMessageInfo struct {
needsInitCheck bool
isMessageSet bool
numRequiredFields uint8
+
+ lazyOffset offset
+ presenceOffset offset
+ presenceSize presenceSize
}
type coderFieldInfo struct {
@@ -45,12 +49,19 @@ type coderFieldInfo struct {
tagsize int // size of the varint-encoded tag
isPointer bool // true if IsNil may be called on the struct field
isRequired bool // true if field is required
+
+ isLazy bool
+ presenceIndex uint32
}
+const noPresence = 0xffffffff
+
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
mi.sizecacheOffset = invalidOffset
mi.unknownOffset = invalidOffset
mi.extensionOffset = invalidOffset
+ mi.lazyOffset = invalidOffset
+ mi.presenceOffset = si.presenceOffset
if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
mi.sizecacheOffset = si.sizecacheOffset
@@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
},
}
case isOneof:
- fieldOffset = offsetOf(fs, mi.Exporter)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
+ fieldOffset = offsetOf(fs)
default:
- fieldOffset = offsetOf(fs, mi.Exporter)
+ fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &preallocFields[i]
@@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
validation: newFieldValidationInfo(mi, si, fd, ft),
isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
isRequired: fd.Cardinality() == protoreflect.Required,
+
+ presenceIndex: noPresence,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
@@ -189,6 +199,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
}
// getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
new file mode 100644
index 000000000..41c1f74ef
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -0,0 +1,153 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
+ mi.sizecacheOffset = si.sizecacheOffset
+ mi.unknownOffset = si.unknownOffset
+ mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+ mi.extensionOffset = si.extensionOffset
+ mi.lazyOffset = si.lazyOffset
+ mi.presenceOffset = si.presenceOffset
+
+ mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+ fields := mi.Desc.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+
+ fs := si.fieldsByNumber[fd.Number()]
+ if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ ft := fs.Type
+ var wiretag uint64
+ if !fd.IsPacked() {
+ wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+ }
+ var fieldOffset offset
+ var funcs pointerCoderFuncs
+ var childMessage *MessageInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ fieldOffset = offsetOf(fs)
+ case fd.Message() != nil && !fd.IsMap():
+ fieldOffset = offsetOf(fs)
+ if fd.IsList() {
+ childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
+ } else {
+ childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
+ }
+ default:
+ fieldOffset = offsetOf(fs)
+ childMessage, funcs = fieldCoder(fd, ft)
+ }
+ cf := &coderFieldInfo{
+ num: fd.Number(),
+ offset: fieldOffset,
+ wiretag: wiretag,
+ ft: ft,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: funcs,
+ mi: childMessage,
+ validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
+ isPointer: (fd.Cardinality() == protoreflect.Repeated ||
+ fd.Kind() == protoreflect.MessageKind ||
+ fd.Kind() == protoreflect.GroupKind),
+ isRequired: fd.Cardinality() == protoreflect.Required,
+ presenceIndex: noPresence,
+ }
+
+ // TODO: Use presence for all fields.
+ //
+ // In some cases, such as maps, presence means only "might be set" rather
+ // than "is definitely set", but every field should have a presence bit to
+ // permit us to skip over definitely-unset fields at marshal time.
+
+ var hasPresence bool
+ hasPresence, cf.isLazy = usePresenceForField(si, fd)
+
+ if hasPresence {
+ cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
+ }
+
+ mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+ mi.coderFields[cf.num] = cf
+ }
+ for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si.structInfo)
+ }
+ }
+ if messageset.IsMessageSet(mi.Desc) {
+ if !mi.extensionOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+ }
+ if !mi.unknownOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+ }
+ mi.isMessageSet = true
+ }
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+ })
+
+ var maxDense protoreflect.FieldNumber
+ for _, cf := range mi.orderedCoderFields {
+ if cf.num >= 16 && cf.num >= 2*maxDense {
+ break
+ }
+ maxDense = cf.num
+ }
+ mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+ for _, cf := range mi.orderedCoderFields {
+ if int(cf.num) > len(mi.denseCoderFields) {
+ break
+ }
+ mi.denseCoderFields[cf.num] = cf
+ }
+
+ // To preserve compatibility with historic wire output, marshal oneofs last.
+ if mi.Desc.Oneofs().Len() > 0 {
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+ fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+ return order.LegacyFieldOrder(fi, fj)
+ })
+ }
+
+ mi.needsInitCheck = needsInitCheck(mi.Desc)
+ if mi.methods.Marshal == nil && mi.methods.Size == nil {
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Marshal = mi.marshal
+ mi.methods.Size = mi.size
+ }
+ if mi.methods.Unmarshal == nil {
+ mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Unmarshal = mi.unmarshal
+ }
+ if mi.methods.CheckInitialized == nil {
+ mi.methods.CheckInitialized = mi.checkInitialized
+ }
+ if mi.methods.Merge == nil {
+ mi.methods.Merge = mi.merge
+ }
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577bd..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
- v := p.v.Elem().Int()
- return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- v := p.v.Elem().Int()
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(v))
- return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- p.v.Elem().SetInt(int64(v))
- out.n = n
- return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
- size: sizeEnum,
- marshal: appendEnum,
- unmarshal: consumeEnum,
- merge: mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- if p.v.Elem().Int() == 0 {
- return 0
- }
- return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- if p.v.Elem().Int() == 0 {
- return b, nil
- }
- return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if src.v.Elem().Int() != 0 {
- dst.v.Elem().Set(src.v.Elem())
- }
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
- size: sizeEnumNoZero,
- marshal: appendEnumNoZero,
- unmarshal: consumeEnum,
- merge: mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- if p.v.Elem().IsNil() {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
- }
- return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if !src.v.Elem().IsNil() {
- v := reflect.New(dst.v.Type().Elem().Elem())
- v.Elem().Set(src.v.Elem().Elem())
- dst.v.Elem().Set(v)
- }
-}
-
-var coderEnumPtr = pointerCoderFuncs{
- size: sizeEnumPtr,
- marshal: appendEnumPtr,
- unmarshal: consumeEnumPtr,
- merge: mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
- }
- return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- s := p.v.Elem()
- if wtyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return out, errDecode
- }
- for len(b) > 0 {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- b = b[n:]
- }
- out.n = n
- return out, nil
- }
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- out.n = n
- return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
- size: sizeEnumSlice,
- marshal: appendEnumSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return 0
- }
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return b, nil
- }
- b = protowire.AppendVarint(b, f.wiretag)
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- b = protowire.AppendVarint(b, uint64(n))
- for i := 0; i < llen; i++ {
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
- size: sizeEnumPackedSlice,
- marshal: appendEnumPackedSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e23..077712c2c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
// When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55a..f72ddd882 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
- // pref.Value.String never panics, so we go through an interface
+ // protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index 304244a65..e4580b3ac 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
return v
}
func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
- iter := mapRange(ms.v)
+ iter := ms.v.MapRange()
for iter.Next() {
k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
v := ms.valConv.PBValueOf(iter.Value())
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index cda0520c2..e0dd21fa5 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
AllowPartial: true,
DiscardUnknown: o.DiscardUnknown(),
Resolver: o.resolver,
+
+ NoLazyDecoding: o.NoLazyDecoding(),
}
}
@@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
return o.flags&protoiface.UnmarshalDiscardUnknown != 0
}
-func (o unmarshalOptions) IsDefault() bool {
- return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
+func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
+func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 }
+func (o unmarshalOptions) NoLazyDecoding() bool {
+ return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
+}
+
+func (o unmarshalOptions) CanBeLazy() bool {
+ if o.resolver != protoregistry.GlobalTypes {
+ return false
+ }
+ // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
+ return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
}
var lazyUnmarshalOptions = unmarshalOptions{
resolver: protoregistry.GlobalTypes,
- depth: protowire.DefaultRecursionLimit,
+
+ flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
+
+ depth: protowire.DefaultRecursionLimit,
}
type unmarshalOutput struct {
@@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if flags.ProtoLegacy && mi.isMessageSet {
return unmarshalMessageSet(mi, b, p, opts)
}
+
+ lazyDecoding := LazyEnabled() // default
+ if opts.NoLazyDecoding() {
+ lazyDecoding = false // explicitly disabled
+ }
+ if mi.lazyOffset.IsValid() && lazyDecoding {
+ return mi.unmarshalPointerLazy(b, p, groupTag, opts)
+ }
+ return mi.unmarshalPointerEager(b, p, groupTag, opts)
+}
+
+// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
+// The corresponding function for Lazy is in google_lazy.go.
+func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+
initialized := true
var requiredMask uint64
var exts *map[int32]ExtensionField
+
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
start := len(b)
for len(b) > 0 {
// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
+
+ if f.presenceIndex != noPresence {
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
return out, errUnknown
}
if flags.LazyUnmarshalExtensions {
- if opts.IsDefault() && x.canLazy(xt) {
+ if opts.CanBeLazy() && x.canLazy(xt) {
out, valid := skipExtension(b, xi, num, wtyp, opts)
switch valid {
case ValidationValid:
@@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
if n < 0 {
return out, ValidationUnknown
}
+
+ if opts.Validated() {
+ out.initialized = true
+ out.n = n
+ return out, ValidationValid
+ }
+
out, st := xi.validation.mi.validate(v, 0, opts)
out.n = n
return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd21224..b2e212291 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,8 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
- proto "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/internal/protolazy"
+ "google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
e := p.Apply(mi.extensionOffset).Extensions()
size += mi.sizeExtensions(e, opts)
}
+
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ if mi.lazyOffset.IsValid() {
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ }
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.size == nil {
continue
}
fptr := p.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ continue
+ }
+
+ if f.isLazy && fptr.AtomicGetPointer().IsNil() {
+ if lazyFields(opts) {
+ size += (*lazy).SizeField(uint32(f.num))
+ continue
+ } else {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+ size += f.funcs.size(fptr, f, opts)
+ continue
+ }
+
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
return b, err
}
}
+
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ if mi.lazyOffset.IsValid() {
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ }
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.marshal == nil {
continue
}
fptr := p.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ continue
+ }
+ if f.isLazy {
+ // Be careful, this field needs to be read atomically, like for a get
+ if f.isPointer && fptr.AtomicGetPointer().IsNil() {
+ if lazyFields(opts) {
+ b, _ = (*lazy).AppendField(b, uint32(f.num))
+ continue
+ } else {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ continue
+ } else if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ continue
+ }
+
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool {
return opts.flags&piface.MarshalDeterministic == 0
}
+// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
+func lazyFields(opts marshalOptions) bool {
+ // When deterministic marshaling is requested, force an unmarshal for lazy
+ // fields to produce a deterministic result, instead of passing through
+ // bytes lazily that may or may not match what Go Protobuf would produce.
+ return opts.flags&piface.MarshalDeterministic == 0
+}
+
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
if ext == nil {
return 0
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 000000000..9f6c32a7d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+ return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+ if mx == nil || my == nil {
+ return mx == my
+ }
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ msx, ok := mx.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ msy, ok := my.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+
+ mi := msx.messageInfo()
+ miy := msy.messageInfo()
+ if mi != miy {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ mi.init()
+ // Compares regular fields
+ // Modified Message.Range code that compares two messages of the same type
+ // while going over the fields.
+ for _, ri := range mi.rangeInfos {
+ var fd protoreflect.FieldDescriptor
+ var vx, vy protoreflect.Value
+
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ hx := ri.has(msx.pointer())
+ hy := ri.has(msy.pointer())
+ if hx != hy {
+ return false
+ }
+ if !hx {
+ continue
+ }
+ fd = ri.fieldDesc
+ vx = ri.get(msx.pointer())
+ vy = ri.get(msy.pointer())
+ case *oneofInfo:
+ fnx := ri.which(msx.pointer())
+ fny := ri.which(msy.pointer())
+ if fnx != fny {
+ return false
+ }
+ if fnx <= 0 {
+ continue
+ }
+ fi := mi.fields[fnx]
+ fd = fi.fieldDesc
+ vx = fi.get(msx.pointer())
+ vy = fi.get(msy.pointer())
+ }
+
+ if !equalValue(fd, vx, vy) {
+ return false
+ }
+ }
+
+ // Compare extensions.
+ // This is more complicated because mx or my could have empty/nil extension maps,
+ // however some populated extension map values are equal to nil extension maps.
+ emx := mi.extensionMap(msx.pointer())
+ emy := mi.extensionMap(msy.pointer())
+ if emx != nil {
+ for k, x := range *emx {
+ xd := x.Type().TypeDescriptor()
+ xv := x.Value()
+ var y ExtensionField
+ ok := false
+ if emy != nil {
+ y, ok = (*emy)[k]
+ }
+ // We need to treat empty lists as equal to nil values
+ if emy == nil || !ok {
+ if xd.IsList() && xv.List().Len() == 0 {
+ continue
+ }
+ return false
+ }
+
+ if !equalValue(xd, xv, y.Value()) {
+ return false
+ }
+ }
+ }
+ if emy != nil {
+ // emy may have extensions emx does not have, need to check them as well
+ for k, y := range *emy {
+ if emx != nil {
+ // emx has the field, so we already checked it
+ if _, ok := (*emx)[k]; ok {
+ continue
+ }
+ }
+ // Empty lists are equal to nil
+ if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+ continue
+ }
+
+ // Cant be equal if the extension is populated
+ return false
+ }
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+ // slow path
+ if fd.Kind() != protoreflect.MessageKind {
+ return vx.Equal(vy)
+ }
+
+ // fast path special cases
+ if fd.IsMap() {
+ if fd.MapValue().Kind() == protoreflect.MessageKind {
+ return equalMessageMap(vx.Map(), vy.Map())
+ }
+ return vx.Equal(vy)
+ }
+
+ if fd.IsList() {
+ return equalMessageList(vx.List(), vy.List())
+ }
+
+ return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+ if mx.Len() != my.Len() {
+ return false
+ }
+ equal := true
+ mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ if !my.Has(k) {
+ equal = false
+ return false
+ }
+ vy := my.Get(k)
+ equal = equalMessage(vx.Message(), vy.Message())
+ return equal
+ })
+ return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+ if lx.Len() != ly.Len() {
+ return false
+ }
+ for i := 0; i < lx.Len(); i++ {
+ // We only operate on messages here since equalImpl will not call us in any other case.
+ if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ if len(mx) != len(my) {
+ return false
+ }
+
+ for k, v1 := range mx {
+ if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
new file mode 100644
index 000000000..c7de31e24
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -0,0 +1,433 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math/bits"
+ "os"
+ "reflect"
+ "sort"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/protolazy"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var enableLazy int32 = func() int32 {
+ if os.Getenv("GOPROTODEBUG") == "nolazy" {
+ return 0
+ }
+ return 1
+}()
+
+// EnableLazyUnmarshal enables lazy unmarshaling.
+func EnableLazyUnmarshal(enable bool) {
+ if enable {
+ atomic.StoreInt32(&enableLazy, 1)
+ return
+ }
+ atomic.StoreInt32(&enableLazy, 0)
+}
+
+// LazyEnabled reports whether lazy unmarshalling is currently enabled.
+func LazyEnabled() bool {
+ return atomic.LoadInt32(&enableLazy) != 0
+}
+
+// UnmarshalField unmarshals a field in a message.
+func UnmarshalField(m interface{}, num protowire.Number) {
+ switch m := m.(type) {
+ case *messageState:
+ m.messageInfo().lazyUnmarshal(m.pointer(), num)
+ case *messageReflectWrapper:
+ m.messageInfo().lazyUnmarshal(m.pointer(), num)
+ default:
+ panic(fmt.Sprintf("unsupported wrapper type %T", m))
+ }
+}
+
+func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ if f == nil {
+ panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
+ }
+ lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
+ if !found && multipleEntries == nil {
+ panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
+ }
+ // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
+ // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
+ fp := pointerOfValue(reflect.New(f.ft))
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
+ }
+ } else {
+ mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
+ }
+ p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
+}
+
+func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
+ opts := lazyUnmarshalOptions
+ opts.flags |= flags
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return errors.New("invalid wire data")
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return errors.New("invalid wire data")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+ if num == f.num {
+ o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
+ if err == nil {
+ b = b[o.n:]
+ continue
+ }
+ if err != errUnknown {
+ return err
+ }
+ }
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return errors.New("invalid wire data")
+ }
+ b = b[n:]
+ }
+ return nil
+}
+
+func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+ fmi := f.validation.mi
+ if fmi == nil {
+ fd := mi.Desc.Fields().ByNumber(f.num)
+ if fd == nil {
+ return out, ValidationUnknown
+ }
+ messageName := fd.Message().FullName()
+ messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ if err != nil {
+ return out, ValidationUnknown
+ }
+ var ok bool
+ fmi, ok = messageType.(*MessageInfo)
+ if !ok {
+ return out, ValidationUnknown
+ }
+ }
+ fmi.init()
+ switch f.validation.typ {
+ case validationTypeMessage:
+ if wtyp != protowire.BytesType {
+ return out, ValidationWrongWireType
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ out, st := fmi.validate(v, 0, opts)
+ out.n = n
+ return out, st
+ case validationTypeGroup:
+ if wtyp != protowire.StartGroupType {
+ return out, ValidationWrongWireType
+ }
+ out, st := fmi.validate(b, f.num, opts)
+ return out, st
+ default:
+ return out, ValidationUnknown
+ }
+}
+
+// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
+// specifically handles lazy unmarshalling. it expects lazyOffset and
+// presenceOffset to both be valid.
+func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ initialized := true
+ var requiredMask uint64
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ var lazyIndex []protolazy.IndexEntry
+ var lastNum protowire.Number
+ outOfOrder := false
+ lazyDecode := false
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ if !presence.AnyPresent(mi.presenceSize) {
+ if opts.CanBeLazy() {
+ // If the message contains existing data, we need to merge into it.
+ // Lazy unmarshaling doesn't merge, so only enable it when the
+ // message is empty (has no presence bitmap).
+ lazyDecode = true
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+ (*lazy).SetUnmarshalFlags(opts.flags)
+ if !opts.AliasBuffer() {
+ // Make a copy of the buffer for lazy unmarshaling.
+ // Set the AliasBuffer flag so recursive unmarshal
+ // operations reuse the copy.
+ b = append([]byte{}, b...)
+ opts.flags |= piface.UnmarshalAliasBuffer
+ }
+ (*lazy).SetBuffer(b)
+ }
+ }
+ // Track special handling of lazy fields.
+ //
+ // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
+ // In the event that validation for a field fails, this map tracks handling of the field.
+ type lazyAction uint8
+ const (
+ lazyValidateOnly lazyAction = iota // validate the field only
+ lazyUnmarshalNow // eagerly unmarshal the field
+ lazyUnmarshalLater // unmarshal the field after the message is fully processed
+ )
+ var lazyFields map[*coderFieldInfo]lazyAction
+ var exts *map[int32]ExtensionField
+ start := len(b)
+ pos := 0
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, errors.New("invalid field number")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if num != groupTag {
+ return out, errors.New("mismatching end group marker")
+ }
+ groupTag = 0
+ break
+ }
+
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ var n int
+ err := errUnknown
+ discardUnknown := false
+ Field:
+ switch {
+ case f != nil:
+ if f.funcs.unmarshal == nil {
+ break
+ }
+ if f.isLazy && lazyDecode {
+ switch {
+ case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
+ // Attempt to validate this field and leave it for later lazy unmarshaling.
+ o, valid := mi.skipField(b, f, wtyp, opts)
+ switch valid {
+ case ValidationValid:
+ // Skip over the valid field and continue.
+ err = nil
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ requiredMask |= f.validation.requiredBit
+ if !o.initialized {
+ initialized = false
+ }
+ n = o.n
+ break Field
+ case ValidationInvalid:
+ return out, errors.New("invalid proto wire format")
+ case ValidationWrongWireType:
+ break Field
+ case ValidationUnknown:
+ if lazyFields == nil {
+ lazyFields = make(map[*coderFieldInfo]lazyAction)
+ }
+ if presence.Present(f.presenceIndex) {
+ // We were unable to determine if the field is valid or not,
+ // and we've already skipped over at least one instance of this
+ // field. Clear the presence bit (so if we stop decoding early,
+ // we don't leave a partially-initialized field around) and flag
+ // the field for unmarshaling before we return.
+ presence.ClearPresent(f.presenceIndex)
+ lazyFields[f] = lazyUnmarshalLater
+ discardUnknown = true
+ break Field
+ } else {
+ // We were unable to determine if the field is valid or not,
+ // but this is the first time we've seen it. Flag it as needing
+ // eager unmarshaling and fall through to the eager unmarshal case below.
+ lazyFields[f] = lazyUnmarshalNow
+ }
+ }
+ case lazyFields[f] == lazyUnmarshalLater:
+ // This field will be unmarshaled in a separate pass below.
+ // Skip over it here.
+ discardUnknown = true
+ break Field
+ default:
+ // Eagerly unmarshal the field.
+ }
+ }
+ if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
+ if p.Apply(f.offset).AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+ var o unmarshalOutput
+ o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+ n = o.n
+ if err != nil {
+ break
+ }
+ requiredMask |= f.validation.requiredBit
+ if f.funcs.isInit != nil && !o.initialized {
+ initialized = false
+ }
+ if f.presenceIndex != noPresence {
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+ default:
+ // Possible extension.
+ if exts == nil && mi.extensionOffset.IsValid() {
+ exts = p.Apply(mi.extensionOffset).Extensions()
+ if *exts == nil {
+ *exts = make(map[int32]ExtensionField)
+ }
+ }
+ if exts == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+ if err != nil {
+ break
+ }
+ n = o.n
+ if !o.initialized {
+ initialized = false
+ }
+ }
+ if err != nil {
+ if err != errUnknown {
+ return out, err
+ }
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+ u := mi.mutableUnknownBytes(p)
+ *u = protowire.AppendTag(*u, num, wtyp)
+ *u = append(*u, b[:n]...)
+ }
+ }
+ b = b[n:]
+ end := start - len(b)
+ if lazyDecode && f != nil && f.isLazy {
+ if num != lastNum {
+ lazyIndex = append(lazyIndex, protolazy.IndexEntry{
+ FieldNum: uint32(num),
+ Start: uint32(pos),
+ End: uint32(end),
+ })
+ } else {
+ i := len(lazyIndex) - 1
+ lazyIndex[i].End = uint32(end)
+ lazyIndex[i].MultipleContiguous = true
+ }
+ }
+ if num < lastNum {
+ outOfOrder = true
+ }
+ pos = end
+ lastNum = num
+ }
+ if groupTag != 0 {
+ return out, errors.New("missing end group marker")
+ }
+ if lazyFields != nil {
+ // Some fields failed validation, and now need to be unmarshaled.
+ for f, action := range lazyFields {
+ if action != lazyUnmarshalLater {
+ continue
+ }
+ initialized = false
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+ if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
+ return out, err
+ }
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+ }
+ if lazyDecode {
+ if outOfOrder {
+ sort.Slice(lazyIndex, func(i, j int) bool {
+ return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
+ (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
+ lazyIndex[i].Start < lazyIndex[j].Start)
+ })
+ }
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+
+ (*lazy).SetIndex(lazyIndex)
+ }
+ if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+ initialized = false
+ }
+ if initialized {
+ out.initialized = true
+ }
+ out.n = start - len(b)
+ return out, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee6..b6849d669 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index bf0b6049b..a51dffbe2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Parent = md
fd.L0.Index = n
- if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
+ if fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
- if fd.L1.IsWeak {
- opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
- }
if fd.L1.EditionFeatures.IsPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 7e65f64f2..8ffdce67d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
if src.IsNil() {
return
}
+
+ var presenceSrc presence
+ var presenceDst presence
+ if mi.presenceOffset.IsValid() {
+ presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
+ presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.merge == nil {
continue
}
sfptr := src.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presenceSrc.Present(f.presenceIndex) {
+ continue
+ }
+ dfptr := dst.Apply(f.offset)
+ if f.isLazy {
+ if sfptr.AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(src, f.num)
+ }
+ if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(dst, f.num)
+ }
+ }
+ f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+ presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ continue
+ }
+
if f.isPointer && sfptr.Elem().IsNil() {
continue
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d45..d50423dcb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,7 +14,6 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -30,8 +29,8 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
- // Exporter must be provided in a purego environment in order to provide
- // access to unexported fields.
+ // Deprecated: Exporter will be removed the next time we bump
+ // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
@@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() {
if mi.initDone == 1 {
return
}
+ if opaqueInitHook(mi) {
+ return
+ }
t := mi.GoReflectType
if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -117,7 +119,6 @@ type (
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
- weakFieldsType = reflect.TypeOf(WeakFields(nil))
unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -126,13 +127,14 @@ var (
type structInfo struct {
sizecacheOffset offset
sizecacheType reflect.Type
- weakOffset offset
- weakType reflect.Type
unknownOffset offset
unknownType reflect.Type
extensionOffset offset
extensionType reflect.Type
+ lazyOffset offset
+ presenceOffset offset
+
fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField
oneofsByName map[protoreflect.Name]reflect.StructField
oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber
@@ -142,9 +144,10 @@ type structInfo struct {
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
si := structInfo{
sizecacheOffset: invalidOffset,
- weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
+ lazyOffset: invalidOffset,
+ presenceOffset: invalidOffset,
fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{},
oneofsByName: map[protoreflect.Name]reflect.StructField{},
@@ -157,24 +160,23 @@ fieldLoop:
switch f := t.Field(i); f.Name {
case genid.SizeCache_goname, genid.SizeCacheA_goname:
if f.Type == sizecacheType {
- si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ si.sizecacheOffset = offsetOf(f)
si.sizecacheType = f.Type
}
- case genid.WeakFields_goname, genid.WeakFieldsA_goname:
- if f.Type == weakFieldsType {
- si.weakOffset = offsetOf(f, mi.Exporter)
- si.weakType = f.Type
- }
case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
- si.unknownOffset = offsetOf(f, mi.Exporter)
+ si.unknownOffset = offsetOf(f)
si.unknownType = f.Type
}
case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
if f.Type == extensionFieldsType {
- si.extensionOffset = offsetOf(f, mi.Exporter)
+ si.extensionOffset = offsetOf(f)
si.extensionType = f.Type
}
+ case "lazyFields", "XXX_lazyUnmarshalInfo":
+ si.lazyOffset = offsetOf(f)
+ case "XXX_presence":
+ si.presenceOffset = offsetOf(f)
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
@@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
mi.init()
fd := mi.Desc.Fields().Get(i)
switch {
- case fd.IsWeak():
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
- return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
default:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
new file mode 100644
index 000000000..dd55e8e00
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -0,0 +1,627 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type opaqueStructInfo struct {
+ structInfo
+}
+
+// isOpaque determines whether a protobuf message type is on the Opaque API. It
+// checks whether the type is a Go struct that protoc-gen-go would generate.
+//
+// This function only detects newly generated messages from the v2
+// implementation of protoc-gen-go. It is unable to classify generated messages
+// that are too old or those that are generated by a different generator
+// such as protoc-gen-gogo.
+func isOpaque(t reflect.Type) bool {
+ // The current detection mechanism is to simply check the first field
+ // for a struct tag with the "protogen" key.
+ if t.Kind() == reflect.Struct && t.NumField() > 0 {
+ pgt := t.Field(0).Tag.Get("protogen")
+ return strings.HasPrefix(pgt, "opaque.")
+ }
+ return false
+}
+
+func opaqueInitHook(mi *MessageInfo) bool {
+ mt := mi.GoReflectType.Elem()
+ si := opaqueStructInfo{
+ structInfo: mi.makeStructInfo(mt),
+ }
+
+ if !isOpaque(mt) {
+ return false
+ }
+
+ defer atomic.StoreUint32(&mi.initDone, 1)
+
+ mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
+ fds := mi.Desc.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ var fi fieldInfo
+ usePresence, _ := usePresenceForField(si, fd)
+
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ // Oneofs are no different for opaque.
+ fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fd.IsMap():
+ fi = mi.fieldInfoForMapOpaque(si, fd, fs)
+ case fd.IsList() && fd.Message() == nil && usePresence:
+ fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
+ case fd.IsList() && fd.Message() == nil:
+ // Proto3 lists without presence can use same access methods as open
+ fi = fieldInfoForList(fd, fs, mi.Exporter)
+ case fd.IsList() && usePresence:
+ fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
+ case fd.IsList():
+ // Proto3 opaque messages that does not need presence bitmap.
+ // Different representation than open struct, but same logic
+ fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
+ case fd.Message() != nil && usePresence:
+ fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
+ case fd.Message() != nil:
+ // Proto3 messages without presence can use same access methods as open
+ fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+ default:
+ fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
+ }
+ mi.fields[fd.Number()] = &fi
+ }
+ mi.oneofs = map[protoreflect.Name]*oneofInfo{}
+ for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
+ od := mi.Desc.Oneofs().Get(i)
+ mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
+ }
+
+ mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+ for i := 0; i < fds.Len(); i++ {
+ if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+ mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+ }
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
+ mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+ i += od.Fields().Len()
+ } else {
+ mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+ i++
+ }
+ }
+
+ mi.makeExtensionFieldsFunc(mt, si.structInfo)
+ mi.makeUnknownFieldsFunc(mt, si.structInfo)
+ mi.makeOpaqueCoderMethods(mt, si)
+ mi.makeFieldTypes(si.structInfo)
+
+ return true
+}
+
+func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fd := od.Fields().Get(0)
+ index, _ := presenceIndex(mi.Desc, fd)
+ oi.which = func(p pointer) protoreflect.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ if !mi.present(p, index) {
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ return oi
+ }
+ // Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
+ return makeOneofInfo(od, si, x)
+}
+
+func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+ }
+ fieldOffset := offsetOf(fs)
+ conv := NewConverter(ft, fd)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ // Don't bother checking presence bits, since we need to
+ // look at the map length even if the presence bit is set.
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(pv)
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(fs.Type))
+ }
+ return conv.PBValueOf(v)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(reflect.PtrTo(ft), fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ }
+ mi.setPresent(p, index)
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(pv.Elem())
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ mi.setPresent(p, index)
+ return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ fieldNumber := fd.Number()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ if !mi.present(p, index) {
+ return false
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ return rv.Elem().Len() > 0
+ },
+ clear: func(p pointer) {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ if !mi.present(p, index) {
+ return conv.Zero()
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ val := conv.GoValueOf(v)
+ if val.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ } else {
+ rv.Elem().Set(val.Elem())
+ }
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ if mi.present(p, index) {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ } else {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ return conv.PBValueOf(rv)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ return false
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ return rv.Elem().Len() > 0
+ },
+ clear: func(p pointer) {
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if !sp.IsNil() {
+ rv := sp.AsValueOf(fs.Type.Elem())
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ }
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ return conv.Zero()
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(reflect.New(fs.Type.Elem()))
+ }
+ val := conv.GoValueOf(v)
+ if val.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ } else {
+ rv.Elem().Set(val.Elem())
+ }
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(reflect.New(fs.Type.Elem()))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ nullable := fd.HasPresence()
+ if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
+ nullable = true
+ }
+ deref := false
+ if nullable && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ deref = true
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ var getter func(p pointer) protoreflect.Value
+ if !nullable {
+ getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+ } else {
+ getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
+ }
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ if nullable {
+ return mi.present(p, index)
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ switch rv.Kind() {
+ case reflect.Bool:
+ return rv.Bool()
+ case reflect.Int32, reflect.Int64:
+ return rv.Int() != 0
+ case reflect.Uint32, reflect.Uint64:
+ return rv.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() != 0 || math.Signbit(rv.Float())
+ case reflect.String, reflect.Slice:
+ return rv.Len() > 0
+ default:
+ panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+ }
+ },
+ clear: func(p pointer) {
+ if nullable {
+ mi.clearPresent(p, index)
+ }
+ // This is only valuable for bytes and strings, but we do it unconditionally.
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: getter,
+ // TODO: Implement unsafe fast path for set?
+ set: func(p pointer, v protoreflect.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if deref {
+ if rv.IsNil() {
+ rv.Set(reflect.New(ft))
+ }
+ rv = rv.Elem()
+ }
+
+ rv.Set(conv.GoValueOf(v))
+ if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
+ rv.Set(emptyBytes)
+ }
+ if nullable {
+ mi.setPresent(p, index)
+ }
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ fieldNumber := fd.Number()
+ elemType := fs.Type.Elem()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ return mi.present(p, index)
+ },
+ clear: func(p pointer) {
+ mi.clearPresent(p, index)
+ p.Apply(fieldOffset).AtomicSetNilPointer()
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ fp := p.Apply(fieldOffset)
+ mp := fp.AtomicGetPointer()
+ if mp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ mp = fp.AtomicGetPointer()
+ }
+ rv := mp.AsValueOf(elemType)
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ val := pointerOfValue(conv.GoValueOf(v))
+ if val.IsNil() {
+ panic("invalid nil pointer")
+ }
+ p.Apply(fieldOffset).AtomicSetPointer(val)
+ mi.setPresent(p, index)
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ fp := p.Apply(fieldOffset)
+ mp := fp.AtomicGetPointer()
+ if mp.IsNil() {
+ if mi.present(p, index) {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ mp = fp.AtomicGetPointer()
+ } else {
+ mp = pointerOfValue(conv.GoValueOf(conv.New()))
+ fp.AtomicSetPointer(mp)
+ mi.setPresent(p, index)
+ }
+ }
+ return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
+ },
+ newMessage: func() protoreflect.Message {
+ return conv.New().Message()
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+// A presenceList wraps a List, updating presence bits as necessary when the
+// list contents change.
+type presenceList struct {
+ pvalueList
+ setPresence func(bool)
+}
+type pvalueList interface {
+ protoreflect.List
+ //Unwrapper
+}
+
+func (list presenceList) Append(v protoreflect.Value) {
+ list.pvalueList.Append(v)
+ list.setPresence(true)
+}
+func (list presenceList) Truncate(i int) {
+ list.pvalueList.Truncate(i)
+ list.setPresence(i > 0)
+}
+
+// presenceIndex returns the index to pass to presence functions.
+//
+// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
+func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
+ found := false
+ var index, numIndices uint32
+ for i := 0; i < md.Fields().Len(); i++ {
+ f := md.Fields().Get(i)
+ if f == fd {
+ found = true
+ index = numIndices
+ }
+ if f.ContainingOneof() == nil || isLastOneofField(f) {
+ numIndices++
+ }
+ }
+ if !found {
+ panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
+ }
+ return index, presenceSize(numIndices)
+}
+
+func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
+ fields := fd.ContainingOneof().Fields()
+ return fields.Get(fields.Len()-1) == fd
+}
+
+func (mi *MessageInfo) setPresent(p pointer, index uint32) {
+ p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
+}
+
+func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
+ p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
+}
+
+func (mi *MessageInfo) present(p pointer, index uint32) bool {
+ return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
+}
+
+// usePresenceForField implements the somewhat intricate logic of when
+// the presence bitmap is used for a field. The main logic is that a
+// field that is optional or that can be lazy will use the presence
+// bit, but for proto2, also maps have a presence bit. It also records
+// if the field can ever be lazy, which is true if we have a
+// lazyOffset and the field is a message or a slice of messages. A
+// field that is lazy will always need a presence bit. Oneofs are not
+// lazy and do not use presence, unless they are a synthetic oneof,
+// which is a proto3 optional field. For proto3 optionals, we use the
+// presence and they can also be lazy when applicable (a message).
+func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+ hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
+
+ // Non-oneof scalar fields with explicit field presence use the presence array.
+ usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ return false, false
+ case fd.IsMap():
+ return false, false
+ case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+ return hasLazyField, hasLazyField
+ default:
+ return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
new file mode 100644
index 000000000..a69825699
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if fd.Kind() == protoreflect.EnumKind {
+ // Enums for nullable opaque types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bool()
+ return protoreflect.ValueOfBool(*x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32()
+ return protoreflect.ValueOfInt32(*x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32()
+ return protoreflect.ValueOfUint32(*x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64()
+ return protoreflect.ValueOfInt64(*x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64()
+ return protoreflect.ValueOfUint64(*x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32()
+ return protoreflect.ValueOfFloat32(*x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64()
+ return protoreflect.ValueOfFloat64(*x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ if len(**x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(**x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(**x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index ecb4623d7..0d20132fa 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
- case fd.IsWeak():
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
@@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
case fd.IsList():
if fd.Enum() != nil || fd.Message() != nil {
ft = fs.Type.Elem()
+
+ if ft.Kind() == reflect.Slice {
+ ft = ft.Elem()
+ }
+
}
isMessage = fd.Message() != nil
case fd.Enum() != nil:
@@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
case fd.Message() != nil:
ft = fs.Type
- if fd.IsWeak() {
- ft = nil
- }
isMessage = true
}
if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 986322b19..68d4ae32e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -8,11 +8,8 @@ import (
"fmt"
"math"
"reflect"
- "sync"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
@@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField,
isMessage := fd.Message() != nil
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
// NOTE: The logic below intentionally assumes that oneof fields are
// well-formatted. That is, the oneof interface never contains a
@@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(reflect.PtrTo(ft), fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
ft := fs.Type
nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+ var getter func(p pointer) protoreflect.Value
if nullable {
if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
// This never occurs for generated message types.
@@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+
+ // Generate specialized getter functions to avoid going through reflect.Value
+ if nullable {
+ getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
+ } else {
+ getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+ }
- // TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
- rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
- return !rv.IsNil()
+ return !p.Apply(fieldOffset).Elem().IsNil()
}
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
@@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) protoreflect.Value {
- if p.IsNil() {
- return conv.Zero()
- }
- rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
- if nullable {
- if rv.IsNil() {
- return conv.Zero()
- }
- if rv.Kind() == reflect.Ptr {
- rv = rv.Elem()
- }
- }
- return conv.PBValueOf(rv)
- },
+ get: getter,
+ // TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable && rv.Kind() == reflect.Ptr {
@@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
-func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
- if !flags.ProtoLegacy {
- panic("no support for proto1 weak fields")
- }
-
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
- }
- })
- }
-
- num := fd.Number()
- return fieldInfo{
- fieldDesc: fd,
- has: func(p pointer) bool {
- if p.IsNil() {
- return false
- }
- _, ok := p.Apply(weakOffset).WeakFields().get(num)
- return ok
- },
- clear: func(p pointer) {
- p.Apply(weakOffset).WeakFields().clear(num)
- },
- get: func(p pointer) protoreflect.Value {
- lazyInit()
- if p.IsNil() {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- m, ok := p.Apply(weakOffset).WeakFields().get(num)
- if !ok {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- set: func(p pointer, v protoreflect.Value) {
- lazyInit()
- m := v.Message()
- if m.Descriptor() != messageType.Descriptor() {
- if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
- panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
- }
- panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
- }
- p.Apply(weakOffset).WeakFields().set(num, m.Interface())
- },
- mutable: func(p pointer) protoreflect.Value {
- lazyInit()
- fs := p.Apply(weakOffset).WeakFields()
- m, ok := fs.get(num)
- if !ok {
- m = messageType.New().Interface()
- fs.set(num, m)
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- newMessage: func() protoreflect.Message {
- lazyInit()
- return messageType.New()
- },
- newField: func() protoreflect.Value {
- lazyInit()
- return protoreflect.ValueOfMessage(messageType.New())
- },
- }
-}
-
func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if fs.Type.Kind() != reflect.Ptr {
- return !isZero(rv)
+ return !rv.IsZero()
}
return !rv.IsNil()
},
@@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
} else {
fs := si.oneofsByName[od.Name()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
return oi
}
-
-// isZero is identical to reflect.Value.IsZero.
-// TODO: Remove this when Go1.13 is the minimally supported Go version.
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return math.Float64bits(v.Float()) == 0
- case reflect.Complex64, reflect.Complex128:
- c := v.Complex()
- return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !isZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
- return v.IsNil()
- case reflect.String:
- return v.Len() == 0
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- default:
- panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
new file mode 100644
index 000000000..af5e063a1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
@@ -0,0 +1,273 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if fd.Kind() == protoreflect.EnumKind {
+ elemType := fs.Type.Elem()
+ // Enums for nullable types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv.Elem())
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).BoolPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfBool(**x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfInt32(**x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfUint32(**x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfInt64(**x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfUint64(**x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfFloat32(**x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfFloat64(**x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ if len(**x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(**x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(**x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ if len(*x) == 0 {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
+
+func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if fd.Kind() == protoreflect.EnumKind {
+ // Enums for non nullable types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bool()
+ return protoreflect.ValueOfBool(*x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32()
+ return protoreflect.ValueOfInt32(*x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32()
+ return protoreflect.ValueOfUint32(*x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64()
+ return protoreflect.ValueOfInt64(*x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64()
+ return protoreflect.ValueOfUint64(*x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32()
+ return protoreflect.ValueOfFloat32(*x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64()
+ return protoreflect.ValueOfFloat64(*x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).String()
+ if len(*x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).String()
+ return protoreflect.ValueOfString(*x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a2..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
- index int
- export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
- if len(f.Index) != 1 {
- panic("embedded structs are not supported")
- }
- if f.PkgPath == "" {
- return offset{index: f.Index[0]} // field is already exported
- }
- if x == nil {
- panic("exporter must be provided for unexported field")
- }
- return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
- return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
- return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
- return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
- if f.export != nil {
- if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
- return pointer{v: v}
- }
- }
- return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
- if got := p.v.Type().Elem(); got != t {
- panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
- }
- return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
- return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
- return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
- return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
- // TODO: reconsider this
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
- sp := p.v.Elem()
- sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
- p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
-func (ms *messageState) pointer() pointer { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
- once sync.Once
- m messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
- m.once.Do(func() {
- m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
- m.m.mi = mi
- })
- return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d8..62f8bf663 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
import (
"reflect"
"sync/atomic"
"unsafe"
+
+ "google.golang.org/protobuf/internal/protolazy"
)
const UnsafeEnabled = true
@@ -23,7 +22,7 @@ type Pointer unsafe.Pointer
type offset uintptr
// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
+func offsetOf(f reflect.StructField) offset {
return offset(f.Offset)
}
@@ -112,8 +111,14 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
+ return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
+}
+
+func (p pointer) PresenceInfo() presence {
+ return presence{P: p.p}
+}
func (p pointer) Elem() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
new file mode 100644
index 000000000..38aa7b7dc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+func (p pointer) AtomicGetPointer() pointer {
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) AtomicSetPointer(v pointer) {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
+}
+
+func (p pointer) AtomicSetNilPointer() {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
+}
+
+func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
+ if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
+ return v
+ }
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+type atomicV1MessageInfo struct{ p Pointer }
+
+func (mi *atomicV1MessageInfo) Get() Pointer {
+ return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
+}
+
+func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
+ if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
+ return p
+ }
+ return mi.Get()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
new file mode 100644
index 000000000..914cb1ded
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -0,0 +1,142 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// presenceSize represents the size of a presence set, which should be the largest index of the set+1
+type presenceSize uint32
+
+// presence is the internal representation of the bitmap array in a generated protobuf
+type presence struct {
+ // This is a pointer to the beginning of an array of uint32
+ P unsafe.Pointer
+}
+
+func (p presence) toElem(num uint32) (ret *uint32) {
+ const (
+ bitsPerByte = 8
+ siz = unsafe.Sizeof(*ret)
+ )
+ // p.P points to an array of uint32, num is the bit in this array that the
+ // caller wants to check/manipulate. Calculate the index in the array that
+ // contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
+ offset := uintptr(num) / (siz * bitsPerByte) * siz
+ return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
+}
+
+// Present checks for the presence of a specific field number in a presence set.
+func (p presence) Present(num uint32) bool {
+ if p.P == nil {
+ return false
+ }
+ return Export{}.Present(p.toElem(num), num)
+}
+
+// SetPresent adds presence for a specific field number in a presence set.
+func (p presence) SetPresent(num uint32, size presenceSize) {
+ Export{}.SetPresent(p.toElem(num), num, uint32(size))
+}
+
+// SetPresentUnatomic adds presence for a specific field number in a presence set without using
+// atomic operations. Only to be called during unmarshaling.
+func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
+ Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
+}
+
+// ClearPresent removes presence for a specific field number in a presence set.
+func (p presence) ClearPresent(num uint32) {
+ Export{}.ClearPresent(p.toElem(num), num)
+}
+
+// LoadPresenceCache (together with PresentInCache) allows for a
+// cached version of checking for presence without re-reading the word
+// for every field. It is optimized for efficiency and assumes no
+// simltaneous mutation of the presence set (or at least does not have
+// a problem with simultaneous mutation giving inconsistent results).
+func (p presence) LoadPresenceCache() (current uint32) {
+ if p.P == nil {
+ return 0
+ }
+ return atomic.LoadUint32((*uint32)(p.P))
+}
+
+// PresentInCache reads presence from a cached word in the presence
+// bitmap. It caches up a new word if the bit is outside the
+// word. This is for really fast iteration through bitmaps in cases
+// where we either know that the bitmap will not be altered, or we
+// don't care about inconsistencies caused by simultaneous writes.
+func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
+ if num/32 != *cachedElement {
+ o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
+ q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+ *current = atomic.LoadUint32(q)
+ *cachedElement = num / 32
+ }
+ return (*current & (1 << (num % 32))) > 0
+}
+
+// AnyPresent checks if any field is marked as present in the bitmap.
+func (p presence) AnyPresent(size presenceSize) bool {
+ n := uintptr((size + 31) / 32)
+ for j := uintptr(0); j < n; j++ {
+ o := j * unsafe.Sizeof(uint32(0))
+ q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+ b := atomic.LoadUint32(q)
+ if b > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// toRaceDetectData finds the preceding RaceDetectHookData in a
+// message by using pointer arithmetic. As the type of the presence
+// set (bitmap) varies with the number of fields in the protobuf, we
+// can not have a struct type containing the array and the
+// RaceDetectHookData. instead the RaceDetectHookData is placed
+// immediately before the bitmap array, and we find it by walking
+// backwards in the struct.
+//
+// This method is only called from the race-detect version of the code,
+// so RaceDetectHookData is never an empty struct.
+func (p presence) toRaceDetectData() *RaceDetectHookData {
+ var template struct {
+ d RaceDetectHookData
+ a [1]uint32
+ }
+ o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
+ return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
+}
+
+func atomicLoadShadowPresence(p **[]byte) *[]byte {
+ return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
+ atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
+}
+
+// findPointerToRaceDetectData finds the preceding RaceDetectHookData
+// in a message by using pointer arithmetic. For the methods called
+// directy from generated code, we don't have a pointer to the
+// beginning of the presence set, but a pointer inside the array. As
+// we know the index of the bit we're manipulating (num), we can
+// calculate which element of the array ptr is pointing to. With that
+// information we find the preceding RaceDetectHookData and can
+// manipulate the shadow bitmap.
+//
+// This method is only called from the race-detect version of the
+// code, so RaceDetectHookData is never an empty struct.
+func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
+ var template struct {
+ d RaceDetectHookData
+ a [1]uint32
+ }
+ o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
+ return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index a24e6bbd7..7b2995dde 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,6 +37,10 @@ const (
// ValidationValid indicates that unmarshaling the message will succeed.
ValidationValid
+
+ // ValidationWrongWireType indicates that a validated field does not have
+ // the expected wire type.
+ ValidationWrongWireType
)
func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
+
+ if ft.Kind() == reflect.Ptr {
+ // Repeated opaque message fields are *[]*T.
+ ft = ft.Elem()
+ }
+
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
+
+ if ft.Kind() == reflect.Ptr {
+ // Repeated opaque message fields are *[]*T.
+ ft = ft.Elem()
+ }
+
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
@@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
- if !fd.IsWeak() {
- vi.mi = getMessageInfo(ft)
- }
+ vi.mi = getMessageInfo(ft)
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
@@ -304,26 +318,6 @@ State:
}
if f != nil {
vi = f.validation
- if vi.typ == validationTypeMessage && vi.mi == nil {
- // Probable weak field.
- //
- // TODO: Consider storing the results of this lookup somewhere
- // rather than recomputing it on every validation.
- fd := st.mi.Desc.Fields().ByNumber(num)
- if fd == nil || !fd.IsWeak() {
- break
- }
- messageName := fd.Message().FullName()
- messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
- switch err {
- case nil:
- vi.mi, _ = messageType.(*MessageInfo)
- case protoregistry.NotFound:
- vi.typ = validationTypeBytes
- default:
- return out, ValidationUnknown
- }
- }
break
}
// Possible extension field.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
deleted file mode 100644
index eb79a7ba9..000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// weakFields adds methods to the exported WeakFields type for internal use.
-//
-// The exported type is an alias to an unnamed type, so methods can't be
-// defined directly on it.
-type weakFields WeakFields
-
-func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
- m, ok := w[int32(num)]
- return m, ok
-}
-
-func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
-
-func (w *weakFields) clear(num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
- _, ok := w[int32(num)]
- return ok
-}
-
-func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
- if m, ok := w[int32(num)]; ok {
- return m
- }
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- return mt.Zero().Interface()
-}
-
-func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
- if m != nil {
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- if mt != m.ProtoReflect().Type() {
- panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
- }
- }
- if m == nil || !m.ProtoReflect().IsValid() {
- delete(*w, int32(num))
- return
- }
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
new file mode 100644
index 000000000..82e5cab4a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
@@ -0,0 +1,364 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper code for parsing a protocol buffer
+
+package protolazy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+// BufferReader is a structure encapsulating a protobuf and a current position
+type BufferReader struct {
+ Buf []byte
+ Pos int
+}
+
+// NewBufferReader creates a new BufferRead from a protobuf
+func NewBufferReader(buf []byte) BufferReader {
+ return BufferReader{Buf: buf, Pos: 0}
+}
+
+var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
+var errOverflow = errors.New("proto: integer overflow")
+
+func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
+ i := b.Pos
+ l := len(b.Buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ v := b.Buf[i]
+ i++
+ x |= (uint64(v) & 0x7F) << shift
+ if v < 0x80 {
+ b.Pos = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// decodeVarint decodes a varint at the current position
+func (b *BufferReader) DecodeVarint() (x uint64, err error) {
+ i := b.Pos
+ buf := b.Buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ b.Pos++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return b.DecodeVarintSlow()
+ }
+
+ var v uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) & 127
+ i++
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 35
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 42
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 49
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 56
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 63
+ if v < 128 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ b.Pos = i
+ return
+}
+
+// decodeVarint32 decodes a varint32 at the current position
+func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
+ i := b.Pos
+ buf := b.Buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ b.Pos++
+ return uint32(buf[i]), nil
+ } else if len(buf)-i < 5 {
+ v, err := b.DecodeVarintSlow()
+ return uint32(v), err
+ }
+
+ var v uint32
+ // we already checked the first byte
+ x = uint32(buf[i]) & 127
+ i++
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ b.Pos = i
+ return
+}
+
+// skipValue skips a value in the protobuf, based on the specified tag
+func (b *BufferReader) SkipValue(tag uint32) (err error) {
+ wireType := tag & 0x7
+ switch protowire.Type(wireType) {
+ case protowire.VarintType:
+ err = b.SkipVarint()
+ case protowire.Fixed64Type:
+ err = b.SkipFixed64()
+ case protowire.BytesType:
+ var n uint32
+ n, err = b.DecodeVarint32()
+ if err == nil {
+ err = b.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ err = b.SkipGroup(tag)
+ case protowire.Fixed32Type:
+ err = b.SkipFixed32()
+ default:
+ err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+ }
+ return
+}
+
+// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack
+func (b *BufferReader) SkipGroup(tag uint32) (err error) {
+ tagStack := make([]uint32, 0, 16)
+ tagStack = append(tagStack, tag)
+ var n uint32
+ for len(tagStack) > 0 {
+ tag, err = b.DecodeVarint32()
+ if err != nil {
+ return err
+ }
+ switch protowire.Type(tag & 0x7) {
+ case protowire.VarintType:
+ err = b.SkipVarint()
+ case protowire.Fixed64Type:
+ err = b.Skip(8)
+ case protowire.BytesType:
+ n, err = b.DecodeVarint32()
+ if err == nil {
+ err = b.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ tagStack = append(tagStack, tag)
+ case protowire.Fixed32Type:
+ err = b.SkipFixed32()
+ case protowire.EndGroupType:
+ if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
+ tagStack = tagStack[:len(tagStack)-1]
+ } else {
+ err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
+ protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// skipVarint effiently skips a varint
+func (b *BufferReader) SkipVarint() (err error) {
+ i := b.Pos
+
+ if len(b.Buf)-i < 10 {
+ // Use DecodeVarintSlow() to check for buffer overflow, but ignore result
+ if _, err := b.DecodeVarintSlow(); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ return errOverflow
+
+out:
+ b.Pos = i + 1
+ return nil
+}
+
+// skip skips the specified number of bytes
+func (b *BufferReader) Skip(n int) (err error) {
+ if len(b.Buf) < b.Pos+n {
+ return io.ErrUnexpectedEOF
+ }
+ b.Pos += n
+ return
+}
+
+// skipFixed64 skips a fixed64
+func (b *BufferReader) SkipFixed64() (err error) {
+ return b.Skip(8)
+}
+
+// skipFixed32 skips a fixed32
+func (b *BufferReader) SkipFixed32() (err error) {
+ return b.Skip(4)
+}
+
+// skipBytes skips a set of bytes
+func (b *BufferReader) SkipBytes() (err error) {
+ n, err := b.DecodeVarint32()
+ if err != nil {
+ return err
+ }
+ return b.Skip(int(n))
+}
+
+// Done returns whether we are at the end of the protobuf
+func (b *BufferReader) Done() bool {
+ return b.Pos == len(b.Buf)
+}
+
+// Remaining returns how many bytes remain
+func (b *BufferReader) Remaining() int {
+ return len(b.Buf) - b.Pos
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
new file mode 100644
index 000000000..ff4d4834b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
@@ -0,0 +1,359 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protolazy contains internal data structures for lazy message decoding.
+package protolazy
+
+import (
+ "fmt"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// IndexEntry is the structure for an index of the fields in a message of a
+// proto (not descending to sub-messages)
+type IndexEntry struct {
+ FieldNum uint32
+ // first byte of this tag/field
+ Start uint32
+ // first byte after a contiguous sequence of bytes for this tag/field, which could
+ // include a single encoding of the field, or multiple encodings for the field
+ End uint32
+ // True if this protobuf segment includes multiple encodings of the field
+ MultipleContiguous bool
+}
+
+// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+type XXX_lazyUnmarshalInfo struct {
+ // Index of fields and their positions in the protobuf for this
+ // message. Make index be a pointer to a slice so it can be updated
+ // atomically. The index pointer is only set once (lazily when/if
+ // the index is first needed), and must always be SET and LOADED
+ // ATOMICALLY.
+ index *[]IndexEntry
+ // The protobuf associated with this lazily decoded message. It is
+ // only set during proto.Unmarshal(). It doesn't need to be set and
+ // loaded atomically, since any simultaneous set (Unmarshal) and read
+ // (during a get) would already be a race in the app code.
+ Protobuf []byte
+ // The flags present when Unmarshal was originally called for this particular message
+ unmarshalFlags piface.UnmarshalInputFlags
+}
+
+// The Buffer and SetBuffer methods let v2/internal/impl interact with
+// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
+
+// Buffer returns the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
+ return lazy.Protobuf
+}
+
+// SetBuffer sets the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
+ lazy.Protobuf = b
+}
+
+// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
+// The flags should reflect how Unmarshal was called.
+func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
+ lazy.unmarshalFlags = f
+}
+
+// UnmarshalFlags returns the original unmarshalInputFlags.
+func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
+ return lazy.unmarshalFlags
+}
+
+// AllowedPartial returns true if the user originally unmarshalled this message with
+// AllowPartial set to true
+func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
+ return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
+}
+
+func protoFieldNumber(tag uint32) uint32 {
+ return tag >> 3
+}
+
+// buildIndex builds an index of the specified protobuf, return the index
+// array and an error.
+func buildIndex(buf []byte) ([]IndexEntry, error) {
+ index := make([]IndexEntry, 0, 16)
+ var lastProtoFieldNum uint32
+ var outOfOrder bool
+
+ var r BufferReader = NewBufferReader(buf)
+
+ for !r.Done() {
+ var tag uint32
+ var err error
+ var curPos = r.Pos
+ // INLINED: tag, err = r.DecodeVarint32()
+ {
+ i := r.Pos
+ buf := r.Buf
+
+ if i >= len(buf) {
+ return nil, errOutOfBounds
+ } else if buf[i] < 0x80 {
+ r.Pos++
+ tag = uint32(buf[i])
+ } else if r.Remaining() < 5 {
+ var v uint64
+ v, err = r.DecodeVarintSlow()
+ tag = uint32(v)
+ } else {
+ var v uint32
+ // we already checked the first byte
+ tag = uint32(buf[i]) & 127
+ i++
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ return nil, errOutOfBounds
+
+ done:
+ r.Pos = i
+ }
+ }
+ // DONE: tag, err = r.DecodeVarint32()
+
+ fieldNum := protoFieldNumber(tag)
+ if fieldNum < lastProtoFieldNum {
+ outOfOrder = true
+ }
+
+ // Skip the current value -- will skip over an entire group as well.
+ // INLINED: err = r.SkipValue(tag)
+ wireType := tag & 0x7
+ switch protowire.Type(wireType) {
+ case protowire.VarintType:
+ // INLINED: err = r.SkipVarint()
+ i := r.Pos
+
+ if len(r.Buf)-i < 10 {
+ // Use DecodeVarintSlow() to skip while
+ // checking for buffer overflow, but ignore result
+ _, err = r.DecodeVarintSlow()
+ goto out2
+ }
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ return nil, errOverflow
+ out:
+ r.Pos = i + 1
+ // DONE: err = r.SkipVarint()
+ case protowire.Fixed64Type:
+ err = r.SkipFixed64()
+ case protowire.BytesType:
+ var n uint32
+ n, err = r.DecodeVarint32()
+ if err == nil {
+ err = r.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ err = r.SkipGroup(tag)
+ case protowire.Fixed32Type:
+ err = r.SkipFixed32()
+ default:
+ err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+ }
+ // DONE: err = r.SkipValue(tag)
+
+ out2:
+ if err != nil {
+ return nil, err
+ }
+ if fieldNum != lastProtoFieldNum {
+ index = append(index, IndexEntry{FieldNum: fieldNum,
+ Start: uint32(curPos),
+ End: uint32(r.Pos)},
+ )
+ } else {
+ index[len(index)-1].End = uint32(r.Pos)
+ index[len(index)-1].MultipleContiguous = true
+ }
+ lastProtoFieldNum = fieldNum
+ }
+ if outOfOrder {
+ sort.Slice(index, func(i, j int) bool {
+ return index[i].FieldNum < index[j].FieldNum ||
+ (index[i].FieldNum == index[j].FieldNum &&
+ index[i].Start < index[j].Start)
+ })
+ }
+ return index, nil
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ size += int(entry.End - entry.Start)
+ }
+ return size
+ }
+ if !found {
+ return 0
+ }
+ return int(end - start)
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
+ }
+ return b, true
+ }
+ if !found {
+ return nil, false
+ }
+ b = append(b, lazy.Protobuf[start:end]...)
+ return b, true
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
+ atomicStoreIndex(&lazy.index, &index)
+}
+
+// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
+// (including protobuf), returns startOffset/endOffset/found.
+func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
+ if lazy.Protobuf == nil {
+ // There is no backing protobuf for this message -- it was made from a builder
+ return 0, 0, false, false, nil
+ }
+ index := atomicLoadIndex(&lazy.index)
+ if index == nil {
+ r, err := buildIndex(lazy.Protobuf)
+ if err != nil {
+ panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
+ }
+ // lazy.index is a pointer to the slice returned by BuildIndex
+ index = &r
+ atomicStoreIndex(&lazy.index, index)
+ }
+ return lookupField(index, fieldNum)
+}
+
+// lookupField returns the offset at which the indicated field starts using
+// the index, offset immediately after field ends (including all instances of
+// a repeated field), and bools indicating if field was found and if there
+// are multiple encodings of the field in the byte range.
+//
+// To hande the uncommon case where there are repeated encodings for the same
+// field which are not consecutive in the protobuf (so we need to returns
+// multiple start/end offsets), we also return a slice multipleEntries. If
+// multipleEntries is non-nil, then multiple entries were found, and the
+// values in the slice should be used, rather than start/end/found.
+func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
+ // The pointer indexp to the index was already loaded atomically.
+ // The slice is uniquely associated with the pointer, so it doesn't
+ // need to be loaded atomically.
+ index := *indexp
+ for i, entry := range index {
+ if fieldNum == entry.FieldNum {
+ if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
+ // Handle the uncommon case where there are
+ // repeated entries for the same field which
+ // are not contiguous in the protobuf.
+ multiple := make([]IndexEntry, 1, 2)
+ multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
+ i++
+ for i < len(index) && index[i].FieldNum == fieldNum {
+ multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
+ i++
+ }
+ return 0, 0, false, false, multiple
+
+ }
+ return entry.Start, entry.End, true, entry.MultipleContiguous, nil
+ }
+ if fieldNum < entry.FieldNum {
+ return 0, 0, false, false, nil
+ }
+ }
+ return 0, 0, false, false, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
new file mode 100644
index 000000000..dc2a64ca6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protolazy
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
+ return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f3338..000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
- return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
- return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index 60166f2ba..42dd6f70c 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
package strs
import (
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index a008acd09..000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
-
-package strs
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
- src := (*sliceHeader)(unsafe.Pointer(&b))
- dst := (*stringHeader)(unsafe.Pointer(&s))
- dst.Data = src.Data
- dst.Len = src.Len
- return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
- src := (*stringHeader)(unsafe.Pointer(&s))
- dst := (*sliceHeader)(unsafe.Pointer(&b))
- dst.Data = src.Data
- dst.Len = src.Len
- dst.Cap = src.Len
- return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
- buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
- n := len(prefix) + len(".") + len(name)
- if len(prefix) == 0 {
- n -= len(".")
- }
- sb.grow(n)
- sb.buf = append(sb.buf, prefix...)
- sb.buf = append(sb.buf, '.')
- sb.buf = append(sb.buf, name...)
- return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
- sb.grow(len(b))
- sb.buf = append(sb.buf, b...)
- return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
- if cap(sb.buf)-len(sb.buf) >= n {
- return
- }
-
- // Unlike strings.Builder, we do not need to copy over the contents
- // of the old buffer since our builder provides no API for
- // retrieving previously created strings.
- sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
- return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f686..aac1cb18a 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 34
- Patch = 2
+ Minor = 36
+ Patch = 6
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index d75a6534c..4cbf1aeaf 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -8,7 +8,6 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
@@ -47,6 +46,12 @@ type UnmarshalOptions struct {
// RecursionLimit limits how deeply messages may be nested.
// If zero, a default limit is applied.
RecursionLimit int
+
+ //
+ // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
+ // default. Lazy decoding only affects submessages (annotated with [lazy =
+ // true] in the .proto file) within messages that use the Opaque API.
+ NoLazyDecoding bool
}
// Unmarshal parses the wire-format message in b and places the result in m.
@@ -104,6 +109,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
if o.DiscardUnknown {
in.Flags |= protoiface.UnmarshalDiscardUnknown
}
+
+ if !allowPartial {
+ // This does not affect how current unmarshal functions work, it just allows them
+ // to record this for lazy the decoding case.
+ in.Flags |= protoiface.UnmarshalCheckRequired
+ }
+ if o.NoLazyDecoding {
+ in.Flags |= protoiface.UnmarshalNoLazyDecoding
+ }
+
out, err = methods.Unmarshal(in)
} else {
o.RecursionLimit--
@@ -156,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
var err error
if fd == nil {
err = errUnknown
- } else if flags.ProtoLegacy {
- if fd.IsWeak() && fd.Message().IsPlaceholder() {
- err = errUnknown // weak referent is not linked in
- }
}
// Parse the field value.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 1f847bcc3..f0473c586 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -63,7 +63,8 @@ type MarshalOptions struct {
// options (except for UseCachedSize itself).
//
// 2. The message and all its submessages have not changed in any
- // way since the Size call.
+ // way since the Size call. For lazily decoded messages, accessing
+ // a message results in decoding the message, which is a change.
//
// If either of these invariants is violated,
// the results are undefined and may include panics or corrupted output.
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b03..c36d4a9cd 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
+
+ // Only one of the messages needs to implement the fast-path for it to work.
+ pmx := protoMethods(mx)
+ pmy := protoMethods(my)
+ if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+ return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+ }
+
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index d248f2928..78445d116 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+// ... // make use of mm
+// }
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe5780..ef55b97dd 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@ func Clone(m Message) Message {
return dst.Interface()
}
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+ return Clone(m).(M)
+}
+
// mergeOptions provides a namespace for merge functions, and can be
// exported in the future if we add user-visible merge options.
type mergeOptions struct{}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index 052fb5ae3..c8675806c 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,11 +12,19 @@ import (
)
// Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
func Size(m Message) int {
return MarshalOptions{}.Size(m)
}
// Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
func (o MarshalOptions) Size(m Message) int {
// Treat a nil message interface as an empty message; nothing to output.
if m == nil {
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
new file mode 100644
index 000000000..267fd0f1f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// ValueOrNil returns nil if has is false, or a pointer to a new variable
+// containing the value returned by the specified getter.
+//
+// This function is similar to the wrappers (proto.Int32(), proto.String(),
+// etc.), but is generic (works for any field type) and works with the hasser
+// and getter of a field, as opposed to a value.
+//
+// This is convenient when populating builder fields.
+//
+// Example:
+//
+// hop := attr.GetDirectHop()
+// injectedRoute := ripb.InjectedRoute_builder{
+// Prefixes: route.GetPrefixes(),
+// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
+// }
+func ValueOrNil[T any](has bool, getter func() T) *T {
+ if !has {
+ return nil
+ }
+ v := getter()
+ return &v
+}
+
+// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
+// it returns a pointer to an empty val message.
+//
+// This function allows for translating code from the old Open Struct API to the
+// new Opaque API.
+//
+// The old Open Struct API represented oneof fields with a wrapper struct:
+//
+// var signedImg *accountpb.SignedImage
+// profile := &accountpb.Profile{
+// // The Avatar oneof will be set, with an empty SignedImage.
+// Avatar: &accountpb.Profile_SignedImage{signedImg},
+// }
+//
+// The new Opaque API treats oneof fields like regular fields, there are no more
+// wrapper structs:
+//
+// var signedImg *accountpb.SignedImage
+// profile := &accountpb.Profile{}
+// profile.SetSignedImage(signedImg)
+//
+// For convenience, the Opaque API also offers Builders, which allow for a
+// direct translation of struct initialization. However, because Builders use
+// nilness to represent field presence (but there is no non-nil wrapper struct
+// anymore), Builders cannot distinguish between an unset oneof and a set oneof
+// with nil message. The above code would need to be translated with help of the
+// ValueOrDefault function to retain the same behavior:
+//
+// var signedImg *accountpb.SignedImage
+// return &accountpb.Profile_builder{
+// SignedImage: proto.ValueOrDefault(signedImg),
+// }.Build()
+func ValueOrDefault[T interface {
+ *P
+ Message
+}, P any](val T) T {
+ if val == nil {
+ return T(new(P))
+ }
+ return val
+}
+
+// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
+// type []byte.
+func ValueOrDefaultBytes(val []byte) []byte {
+ if val == nil {
+ return []byte{}
+ }
+ return val
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 8fbecb4f5..823dbf3ba 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,6 +13,8 @@
package protodesc
import (
+ "strings"
+
"google.golang.org/protobuf/internal/editionssupport"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/filedesc"
@@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
default:
return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
}
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
- return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
- }
f.L1.Path = fd.GetName()
if f.L1.Path == "" {
return nil, errors.New("file path must be populated")
}
+ if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+ // Allow cmd/protoc-gen-go/testdata to use any edition for easier
+ // testing of upcoming edition features.
+ if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
+ return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+ }
+ }
f.L1.Package = protoreflect.FullName(fd.GetPackage())
if !f.L1.Package.IsValid() && f.L1.Package != "" {
return nil, errors.New("invalid package: %q", f.L1.Package)
@@ -126,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
}
f.L2.Imports[i].IsPublic = true
}
- for _, i := range fd.GetWeakDependency() {
- if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
- return nil, errors.New("invalid or duplicate weak import index: %d", i)
- }
- f.L2.Imports[i].IsWeak = true
- }
imps := importSet{f.Path(): true}
for i, path := range fd.GetDependency() {
imp := &f.L2.Imports[i]
f, err := r.FindFileByPath(path)
- if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+ if err == protoregistry.NotFound && o.AllowUnresolvable {
f = filedesc.PlaceholderFile(path)
} else if err != nil {
return nil, errors.New("could not resolve import %q: %v", path, err)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 856175542..9da34998b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -149,7 +149,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
if opts := fd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
- f.L1.IsWeak = opts.GetWeak()
+ f.L1.IsLazy = opts.GetLazy()
if opts.Packed != nil {
f.L1.EditionFeatures.IsPacked = opts.GetPacked()
}
@@ -214,6 +214,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
if xd.JsonName != nil {
x.L2.StringName.InitJSON(xd.GetJsonName())
}
+ if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+ x.L1.Kind = protoreflect.GroupKind
+ }
}
return xs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index f3cebab29..ff692436e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
o.L1.Fields.List = append(o.L1.Fields.List, f)
}
- if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+ if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil {
return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
}
if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
@@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
for i, xd := range xds {
x := &xs[i]
- if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+ if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil {
return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
}
- if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+ if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil {
return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
}
if xd.DefaultValue != nil {
@@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
s := &ss[i]
for j, md := range sd.GetMethod() {
m := &s.L2.Methods.List[j]
- m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+ m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()))
if err != nil {
return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
}
- m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+ m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()))
if err != nil {
return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
}
@@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
// findTarget finds an enum or message descriptor if k is an enum, message,
// group, or unknown. If unknown, and the name could be resolved, the kind
// returned kind is set based on the type of the resolved descriptor.
-func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
switch k {
case protoreflect.EnumKind:
- ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+ ed, err := r.findEnumDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
return k, ed, nil, nil
case protoreflect.MessageKind, protoreflect.GroupKind:
- md, err := r.findMessageDescriptor(scope, ref, isWeak)
+ md, err := r.findMessageDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
@@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName,
// Handle unspecified kinds (possible with parsers that operate
// on a per-file basis without knowledge of dependencies).
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return 0, nil, nil, errors.New("%q not found", ref.FullName())
@@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName)
}
}
-func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderEnum(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
@@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa
return ed, nil
}
-func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index 6de31c2eb..c343d9227 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
}
}
- if f.IsWeak() && !flags.ProtoLegacy {
- return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
- }
- if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
- return errors.New("message field %q may only be weak for an optional message", f.FullName())
- }
if f.IsPacked() && !isPackable(f) {
return errors.New("message field %q is not packable", f.FullName())
}
@@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
if f.Cardinality() != protoreflect.Optional {
return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
}
- if f.IsWeak() {
- return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
- }
}
}
@@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd
return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
}
}
- if xd.GetOptions().GetWeak() {
- return errors.New("extension field %q cannot be a weak reference", x.FullName())
- }
if x.IsPacked() && !isPackable(x) {
return errors.New("extension field %q is not packable", x.FullName())
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 804830eda..697a61b29 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -11,10 +11,11 @@ import (
"google.golang.org/protobuf/internal/editiondefaults"
"google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
- gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
+ "google.golang.org/protobuf/types/gofeaturespb"
)
var defaults = &descriptorpb.FeatureSetDefaults{}
@@ -43,6 +44,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
return descriptorpb.Edition_EDITION_PROTO3
case filedesc.Edition2023:
return descriptorpb.Edition_EDITION_2023
+ case filedesc.Edition2024:
+ return descriptorpb.Edition_EDITION_2024
default:
panic(fmt.Sprintf("unknown value for edition: %v", ed))
}
@@ -123,10 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
}
- if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil {
- if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
- parentFS.GenerateLegacyUnmarshalJSON = *luje
- }
+ // We must not use proto.GetExtension(child, gofeaturespb.E_Go)
+ // because that only works for messages we generated, but not for
+ // dynamicpb messages. See golang/protobuf#1669.
+ //
+ // Further, we harden this code against adversarial inputs: a
+ // service which accepts descriptors from a possibly malicious
+ // source shouldn't crash.
+ goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
+ if !goFeatures.IsValid() {
+ return parentFS
+ }
+ gf, ok := goFeatures.Interface().(protoreflect.Message)
+ if !ok {
+ return parentFS
+ }
+ // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
+ fields := gf.Descriptor().Fields()
+
+ if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.BoolKind &&
+ gf.Has(fd) {
+ parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.APILevel = int(gf.Get(fd).Enum())
}
return parentFS
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index a5de8d400..9b880aa8c 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
if imp.IsPublic {
p.PublicDependency = append(p.PublicDependency, int32(i))
}
- if imp.IsWeak {
- p.WeakDependency = append(p.WeakDependency, int32(i))
- }
}
for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
loc := locs.Get(i)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6eb..742cb518c 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
Unmarshal func(unmarshalInput) (unmarshalOutput, error)
Merge func(mergeInput) mergeOutput
CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ Equal func(equalInput) equalOutput
}
supportFlags = uint64
sizeInput = struct {
@@ -75,4 +76,13 @@ type (
checkInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+ equalInput = struct {
+ pragma.NoUnkeyedLiterals
+ MessageA Message
+ MessageB Message
+ }
+ equalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Equal bool
+ }
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index ea154eec4..a4a0a2971 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -398,6 +398,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
b = p.appendSingularField(b, "message_encoding", nil)
case 6:
b = p.appendSingularField(b, "json_format", nil)
+ case 7:
+ b = p.appendSingularField(b, "enforce_naming_style", nil)
}
return b
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index cd8fadbaf..cd7fbc87a 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -68,7 +68,7 @@ type Descriptor interface {
// dependency is not resolved, in which case only name information is known.
//
// Placeholder types may only be returned by the following accessors
- // as a result of unresolved dependencies or weak imports:
+ // as a result of unresolved dependencies:
//
// ╔═══════════════════════════════════╤═════════════════════╗
// ║ Accessor │ Descriptor ║
@@ -168,11 +168,7 @@ type FileImport struct {
// The current file and the imported file must be within proto package.
IsPublic bool
- // IsWeak reports whether this is a weak import, which does not impose
- // a direct dependency on the target file.
- //
- // Weak imports are a legacy proto1 feature. Equivalent behavior is
- // achieved using proto2 extension fields or proto3 Any messages.
+ // Deprecated: support for weak fields has been removed.
IsWeak bool
}
@@ -325,9 +321,7 @@ type FieldDescriptor interface {
// specified in the source .proto file.
HasOptionalKeyword() bool
- // IsWeak reports whether this is a weak field, which does not impose a
- // direct dependency on the target type.
- // If true, then Message returns a placeholder type.
+ // Deprecated: support for weak fields has been removed.
IsWeak() bool
// IsPacked reports whether repeated primitive numeric kinds should be
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06ff..a4b78acef 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@ type Message interface {
// This method may return nil.
//
// The returned methods type is identical to
- // google.golang.org/protobuf/runtime/protoiface.Methods.
+ // [google.golang.org/protobuf/runtime/protoiface.Methods].
// Consult the protoiface package documentation for details.
ProtoMethods() *methods
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2af..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
- nilType valueType = iota
- boolType
- int32Type
- int64Type
- uint32Type
- uint64Type
- float32Type
- float64Type
- stringType
- bytesType
- enumType
- ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
- pragma.DoNotCompare // 0B
-
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface any // 16B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
- return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
- return v.str
-}
-func (v Value) getBytes() []byte {
- return v.bin
-}
-func (v Value) getIface() any {
- return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
index f7d386990..fe17f3722 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
package protoreflect
import (
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index 7f3583ead..000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
-
-package protoreflect
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/internal/pragma"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
- ifaceHeader struct {
- Type unsafe.Pointer
- Data unsafe.Pointer
- }
-)
-
-var (
- nilType = typeOf(nil)
- boolType = typeOf(*new(bool))
- int32Type = typeOf(*new(int32))
- int64Type = typeOf(*new(int64))
- uint32Type = typeOf(*new(uint32))
- uint64Type = typeOf(*new(uint64))
- float32Type = typeOf(*new(float32))
- float64Type = typeOf(*new(float64))
- stringType = typeOf(*new(string))
- bytesType = typeOf(*new([]byte))
- enumType = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
- return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
- pragma.DoNotCompare // 0B
-
- // typ stores the type of the value as a pointer to the Go type.
- typ unsafe.Pointer // 8B
-
- // ptr stores the data pointer for a String, Bytes, or interface value.
- ptr unsafe.Pointer // 8B
-
- // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
- // Enum value as a raw uint64.
- //
- // It is also used to store the length of a String or Bytes value;
- // the capacity is ignored.
- num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
- p := (*stringHeader)(unsafe.Pointer(&v))
- return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
- p := (*sliceHeader)(unsafe.Pointer(&v))
- return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
- p := (*ifaceHeader)(unsafe.Pointer(&v))
- return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
- *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
- return x
-}
-func (v Value) getBytes() (x []byte) {
- *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
- return x
-}
-func (v Value) getIface() (x any) {
- *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
- return x
-}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d8..28e9e9f03 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
// CheckInitialized returns an error if any required fields in the message are not set.
CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+ // Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+ Equal func(EqualInput) EqualOutput
}
// SupportFlags indicate support for optional features.
@@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8
const (
UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+
+ // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
+ // The unmarshaller must not modify the contents of the buffer.
+ UnmarshalAliasBuffer
+
+ // UnmarshalValidated indicates that validation has already been
+ // performed on the input buffer.
+ UnmarshalValidated
+
+ // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
+ // initialized.
+ UnmarshalCheckRequired
+
+ // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
+ // lazy decoding, even when otherwise available.
+ UnmarshalNoLazyDecoding
)
// UnmarshalOutputFlags are output from the Unmarshal method.
@@ -166,3 +185,18 @@ type CheckInitializedInput = struct {
type CheckInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ MessageA protoreflect.Message
+ MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 4a1ab7fb3..93df1b569 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,6 +15,7 @@ import (
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/filetype"
"google.golang.org/protobuf/internal/impl"
+ "google.golang.org/protobuf/internal/protolazy"
)
// UnsafeEnabled specifies whether package unsafe can be used.
@@ -39,6 +40,9 @@ type (
ExtensionFieldV1 = impl.ExtensionField
Pointer = impl.Pointer
+
+ LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo
+ RaceDetectHookData = impl.RaceDetectHookData
)
var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 9403eb075..7fe280f19 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -46,6 +46,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// The full set of known editions.
@@ -69,7 +70,7 @@ const (
Edition_EDITION_2023 Edition = 1000
Edition_EDITION_2024 Edition = 1001
// Placeholder editions for testing feature resolution. These should not be
- // used or relyed on outside of tests.
+ // used or relied on outside of tests.
Edition_EDITION_1_TEST_ONLY Edition = 1
Edition_EDITION_2_TEST_ONLY Edition = 2
Edition_EDITION_99997_TEST_ONLY Edition = 99997
@@ -577,8 +578,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
}
// If set to RETENTION_SOURCE, the option will be omitted from the binary.
-// Note: as of January 2023, support for this is in progress and does not yet
-// have an effect (b/264593489).
type FieldOptions_OptionRetention int32
const (
@@ -640,8 +639,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) {
// This indicates the types of entities that the field may apply to when used
// as an option. If it is unset, then the field may be freely used as an
-// option on any kind of entity. Note: as of January 2023, support for this is
-// in progress and does not yet have an effect (b/264593489).
+// option on any kind of entity.
type FieldOptions_OptionTargetType int32
const (
@@ -1141,6 +1139,65 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
}
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+ FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1
+ FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+ FeatureSet_EnforceNamingStyle_name = map[int32]string{
+ 0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+ 1: "STYLE2024",
+ 2: "STYLE_LEGACY",
+ }
+ FeatureSet_EnforceNamingStyle_value = map[string]int32{
+ "ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+ "STYLE2024": 1,
+ "STYLE_LEGACY": 2,
+ }
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+ p := new(FeatureSet_EnforceNamingStyle)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[16]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_EnforceNamingStyle(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1179,11 +1236,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[16]
+ return &file_google_protobuf_descriptor_proto_enumTypes[17]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1208,20 +1265,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
type FileDescriptorSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FileDescriptorSet) Reset() {
*x = FileDescriptorSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorSet) String() string {
@@ -1232,7 +1287,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1256,12 +1311,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto {
// Describes a complete .proto file.
type FileDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree
- Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
// Indexes of the public imported files in the dependency list above.
@@ -1284,18 +1336,24 @@ type FileDescriptorProto struct {
// The supported values are "proto2", "proto3", and "editions".
//
// If `edition` is present, this value must be "editions".
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
// The edition of the proto file.
- Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
+ Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FileDescriptorProto) Reset() {
*x = FileDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorProto) String() string {
@@ -1306,7 +1364,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1414,10 +1472,7 @@ func (x *FileDescriptorProto) GetEdition() Edition {
// Describes a message type.
type DescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
@@ -1429,16 +1484,16 @@ type DescriptorProto struct {
ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
- ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto) Reset() {
*x = DescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto) String() string {
@@ -1449,7 +1504,7 @@ func (*DescriptorProto) ProtoMessage() {}
func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1535,11 +1590,7 @@ func (x *DescriptorProto) GetReservedName() []string {
}
type ExtensionRangeOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
// For external users: DO NOT USE. We are in the process of open sourcing
@@ -1551,7 +1602,10 @@ type ExtensionRangeOptions struct {
// The verification state of the range.
// TODO: flip the default to DECLARATION once all empty ranges
// are marked as UNVERIFIED.
- Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+ Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for ExtensionRangeOptions fields.
@@ -1561,11 +1615,9 @@ const (
func (x *ExtensionRangeOptions) Reset() {
*x = ExtensionRangeOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions) String() string {
@@ -1576,7 +1628,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1621,10 +1673,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica
// Describes a field within a message.
type FieldDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
@@ -1676,15 +1725,15 @@ type FieldDescriptorProto struct {
// Proto2 optional fields do not set this flag, because they already indicate
// optional with `LABEL_OPTIONAL`.
Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FieldDescriptorProto) Reset() {
*x = FieldDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldDescriptorProto) String() string {
@@ -1695,7 +1744,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1789,21 +1838,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool {
// Describes a oneof.
type OneofDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *OneofDescriptorProto) Reset() {
*x = OneofDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofDescriptorProto) String() string {
@@ -1814,7 +1860,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1845,10 +1891,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions {
// Describes an enum type.
type EnumDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
@@ -1858,16 +1901,16 @@ type EnumDescriptorProto struct {
ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
- ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *EnumDescriptorProto) Reset() {
*x = EnumDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto) String() string {
@@ -1878,7 +1921,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1930,22 +1973,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
// Describes a value within an enum.
type EnumValueDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
- Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *EnumValueDescriptorProto) Reset() {
*x = EnumValueDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueDescriptorProto) String() string {
@@ -1956,7 +1996,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1994,22 +2034,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
// Describes a service.
type ServiceDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
- Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ServiceDescriptorProto) Reset() {
*x = ServiceDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceDescriptorProto) String() string {
@@ -2020,7 +2057,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2058,11 +2095,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions {
// Describes a method of a service.
type MethodDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
@@ -2072,6 +2106,8 @@ type MethodDescriptorProto struct {
ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
// Identifies if server streams multiple server messages
ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MethodDescriptorProto fields.
@@ -2082,11 +2118,9 @@ const (
func (x *MethodDescriptorProto) Reset() {
*x = MethodDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodDescriptorProto) String() string {
@@ -2097,7 +2131,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2155,11 +2189,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool {
}
type FileOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Sets the Java package where classes generated from this .proto will be
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
@@ -2247,10 +2277,16 @@ type FileOptions struct {
// determining the ruby package.
RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for FileOptions fields.
@@ -2267,11 +2303,9 @@ const (
func (x *FileOptions) Reset() {
*x = FileOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileOptions) String() string {
@@ -2282,7 +2316,7 @@ func (*FileOptions) ProtoMessage() {}
func (x *FileOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2446,11 +2480,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type MessageOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Set true to use the old proto1 MessageSet wire format for extensions.
// This is provided for backwards-compatibility with the MessageSet wire
// format. You should not use this for any other reason: It's less
@@ -2520,9 +2550,15 @@ type MessageOptions struct {
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MessageOptions fields.
@@ -2534,11 +2570,9 @@ const (
func (x *MessageOptions) Reset() {
*x = MessageOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MessageOptions) String() string {
@@ -2549,7 +2583,7 @@ func (*MessageOptions) ProtoMessage() {}
func (x *MessageOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2615,17 +2649,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type FieldOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
// options below. This option is only implemented to support use of
// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
- // type "bytes" in the open source release -- sorry, we'll try to include
- // other types in a future version!
+ // type "bytes" in the open source release.
+ // TODO: make ctype actually deprecated.
Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
// The packed option can be enabled for repeated primitive fields to enable
// a more efficient representation on the wire. Rather than repeatedly
@@ -2688,10 +2719,16 @@ type FieldOptions struct {
Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for FieldOptions fields.
@@ -2707,11 +2744,9 @@ const (
func (x *FieldOptions) Reset() {
*x = FieldOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions) String() string {
@@ -2722,7 +2757,7 @@ func (*FieldOptions) ProtoMessage() {}
func (x *FieldOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2836,24 +2871,24 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type OneofOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *OneofOptions) Reset() {
*x = OneofOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofOptions) String() string {
@@ -2864,7 +2899,7 @@ func (*OneofOptions) ProtoMessage() {}
func (x *OneofOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2894,11 +2929,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type EnumOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Set this option to true to allow mapping different tag names to the same
// value.
AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
@@ -2917,9 +2948,15 @@ type EnumOptions struct {
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for EnumOptions fields.
@@ -2929,11 +2966,9 @@ const (
func (x *EnumOptions) Reset() {
*x = EnumOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumOptions) String() string {
@@ -2944,7 +2979,7 @@ func (*EnumOptions) ProtoMessage() {}
func (x *EnumOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2996,17 +3031,16 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type EnumValueOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Is this enum value deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
// Indicate that fields annotated with this enum value should not be printed
// out when using debug formats, e.g. when the field contains sensitive
@@ -3016,6 +3050,9 @@ type EnumValueOptions struct {
FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for EnumValueOptions fields.
@@ -3026,11 +3063,9 @@ const (
func (x *EnumValueOptions) Reset() {
*x = EnumValueOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueOptions) String() string {
@@ -3041,7 +3076,7 @@ func (*EnumValueOptions) ProtoMessage() {}
func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3092,12 +3127,11 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type ServiceOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
@@ -3106,6 +3140,9 @@ type ServiceOptions struct {
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for ServiceOptions fields.
@@ -3115,11 +3152,9 @@ const (
func (x *ServiceOptions) Reset() {
*x = ServiceOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceOptions) String() string {
@@ -3130,7 +3165,7 @@ func (*ServiceOptions) ProtoMessage() {}
func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3167,11 +3202,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
}
type MethodOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Is this method deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
@@ -3179,9 +3210,15 @@ type MethodOptions struct {
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MethodOptions fields.
@@ -3192,11 +3229,9 @@ const (
func (x *MethodOptions) Reset() {
*x = MethodOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodOptions) String() string {
@@ -3207,7 +3242,7 @@ func (*MethodOptions) ProtoMessage() {}
func (x *MethodOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3257,11 +3292,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
// in them.
type UninterpretedOption struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
@@ -3270,15 +3302,15 @@ type UninterpretedOption struct {
DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UninterpretedOption) Reset() {
*x = UninterpretedOption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption) String() string {
@@ -3289,7 +3321,7 @@ func (*UninterpretedOption) ProtoMessage() {}
func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3360,26 +3392,24 @@ func (x *UninterpretedOption) GetAggregateValue() string {
// be designed and implemented to handle this, hopefully before we ever hit a
// conflict here.
type FeatureSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+ EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSet) Reset() {
*x = FeatureSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSet) String() string {
@@ -3390,7 +3420,7 @@ func (*FeatureSet) ProtoMessage() {}
func (x *FeatureSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3447,15 +3477,19 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
return FeatureSet_JSON_FORMAT_UNKNOWN
}
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+ if x != nil && x.EnforceNamingStyle != nil {
+ return *x.EnforceNamingStyle
+ }
+ return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
// A compiled specification for the defaults of a set of features. These
// messages are generated from FeatureSet extensions and can be used to seed
// feature resolution. The resolution with this object becomes a simple search
// for the closest matching edition, followed by proto merges.
type FeatureSetDefaults struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
// The minimum supported edition (inclusive) when this was constructed.
// Editions before this will not have defaults.
@@ -3463,15 +3497,15 @@ type FeatureSetDefaults struct {
// The maximum known edition (inclusive) when this was constructed. Editions
// after this will not have reliable defaults.
MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSetDefaults) Reset() {
*x = FeatureSetDefaults{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults) String() string {
@@ -3482,7 +3516,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3521,10 +3555,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
type SourceCodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A Location identifies a piece of source code in a .proto file which
// corresponds to a particular definition. This information is intended
// to be useful to IDEs, code indexers, documentation generators, and similar
@@ -3573,16 +3604,17 @@ type SourceCodeInfo struct {
// - Code which tries to interpret locations should probably be designed to
// ignore those that it doesn't understand, as more types of locations could
// be recorded in the future.
- Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo) String() string {
@@ -3593,7 +3625,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3619,22 +3651,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
// file. A GeneratedCodeInfo message is associated with only one generated
// source file, but may contain references to different source .proto files.
type GeneratedCodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// An Annotation connects some span of text in generated code to an element
// of its generating .proto file.
- Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo) String() string {
@@ -3645,7 +3674,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3668,22 +3697,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
}
type DescriptorProto_ExtensionRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
- Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3694,7 +3720,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3734,21 +3760,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
type DescriptorProto_ReservedRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ReservedRange) String() string {
@@ -3759,7 +3782,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3789,10 +3812,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 {
}
type ExtensionRangeOptions_Declaration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The extension number declared within the extension range.
Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"`
// The fully-qualified name of the extension field. There must be a leading
@@ -3808,16 +3828,16 @@ type ExtensionRangeOptions_Declaration struct {
Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"`
// If true, indicates that the extension must be defined as repeated.
// Otherwise the extension must be defined as optional.
- Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+ Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3828,7 +3848,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3885,21 +3905,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool {
// is inclusive such that it can appropriately represent the entire int32
// domain.
type EnumDescriptorProto_EnumReservedRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive.
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive.
+ sizeCache protoimpl.SizeCache
}
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3910,7 +3927,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3940,21 +3957,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
}
type FieldOptions_EditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
+ sizeCache protoimpl.SizeCache
}
func (x *FieldOptions_EditionDefault) Reset() {
*x = FieldOptions_EditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_EditionDefault) String() string {
@@ -3965,7 +3979,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3996,10 +4010,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string {
// Information about the support window of a feature.
type FieldOptions_FeatureSupport struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The edition that this feature was first available in. In editions
// earlier than this one, the default assigned to EDITION_LEGACY will be
// used, and proto files will not be able to override it.
@@ -4014,15 +4025,15 @@ type FieldOptions_FeatureSupport struct {
// this one, the last default assigned will be used, and proto files will
// not be able to override it.
EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FieldOptions_FeatureSupport) Reset() {
*x = FieldOptions_FeatureSupport{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_FeatureSupport) String() string {
@@ -4033,7 +4044,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4082,21 +4093,18 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
// "foo.(bar.baz).moo".
type UninterpretedOption_NamePart struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
unknownFields protoimpl.UnknownFields
-
- NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
- IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption_NamePart) String() string {
@@ -4107,7 +4115,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4141,24 +4149,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
// the defaults at the closest matching edition ordered at or before it should
// be used. This field must be in strict ascending order by edition.
type FeatureSetDefaults_FeatureSetEditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
// Defaults of features that can be overridden in this edition.
OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
// Defaults of features that can't be overridden in this edition.
FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4169,7 +4174,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4206,10 +4211,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur
}
type SourceCodeInfo_Location struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Identifies which part of the FileDescriptorProto was defined at this
// location.
//
@@ -4301,15 +4303,15 @@ type SourceCodeInfo_Location struct {
LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo_Location) String() string {
@@ -4320,7 +4322,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4371,10 +4373,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
}
type GeneratedCodeInfo_Annotation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
@@ -4386,17 +4385,17 @@ type GeneratedCodeInfo_Annotation struct {
// Identifies the ending offset in bytes in the generated code that
// relates to the identified object. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
- Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4407,7 +4406,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4459,789 +4458,381 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
-var file_google_protobuf_descriptor_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
- 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
- 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
- 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
- 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
- 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
- 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
- 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
- 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
- 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
- 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
- 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
- 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
- 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
- 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
- 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
- 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
- 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
- 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
- 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88,
- 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c,
- 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
- 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a,
- 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c,
- 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,
- 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f,
- 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
- 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6,
- 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a,
- 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a,
- 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a,
- 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12,
- 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12,
- 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d,
- 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12,
- 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
- 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45,
- 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49,
- 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
- 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c,
- 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e,
- 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45,
- 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45,
- 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14,
- 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
- 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e,
- 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
- 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
- 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01,
- 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06,
- 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75,
- 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
- 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
- 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
- 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43,
- 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61,
- 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61,
- 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
- 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
- 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e,
- 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74,
- 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18,
- 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61,
- 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66,
- 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f,
- 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f,
- 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d,
- 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61,
- 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15,
- 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
- 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
- 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
- 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
- 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
- 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
- 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
- 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
- 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70,
- 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50,
- 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
- 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
- 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70,
- 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14,
- 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f,
- 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64,
- 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70,
- 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61,
- 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e,
- 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37,
- 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
- 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04,
- 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a,
- 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16,
- 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06,
- 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53,
- 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12,
- 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e,
- 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
- 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c,
- 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
- 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13,
- 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a,
- 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
- 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f,
- 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
- 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a,
- 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12,
- 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f,
- 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
- 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11,
- 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69,
- 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
- 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
- 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
- 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
- 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
- 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
- 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
- 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
- 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
- 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
- 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
- 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
- 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
- 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
- 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
- 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
- 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
- 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
- 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f,
- 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
- 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
- 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
- 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
- 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
- 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
- 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02,
- 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61,
- 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
- 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
- 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
- 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65,
- 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
- 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d,
- 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
- 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f,
- 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10,
- 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10,
- 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a,
- 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74,
- 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69,
- 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10,
- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
- 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65,
- 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21,
- 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
- 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61,
- 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a,
- 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
- 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61,
- 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74,
- 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73,
- 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01,
- 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
- 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c,
- 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
- 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09,
- 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75,
- 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01,
- 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01,
- 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07,
- 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72,
- 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74,
- 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42,
- 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
- 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50,
- 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15,
- 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98,
- 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6,
- 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2,
- 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01,
- 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47,
- 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01,
- 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
- 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01,
- 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53,
- 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05,
- 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a,
- 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46,
- 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49,
- 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49,
- 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45,
- 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f,
- 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10,
- 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45,
- 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43,
- 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45,
- 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66,
- 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55,
- 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49,
- 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04,
- 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41,
- 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f,
- 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45,
- 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f,
- 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f,
- 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
- 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c,
- 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52,
- 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e,
- 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07,
- 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f,
- 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
- 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d,
- 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69,
- 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f,
- 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61,
- 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66,
- 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a,
- 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce,
- 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70,
- 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70,
- 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c,
- 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
- 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65,
- 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64,
- 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
- 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44,
- 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22,
- 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64,
- 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67,
- 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08,
- 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61,
- 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07,
- 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53,
- 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13,
- 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c,
- 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
- 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
- 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
- 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
- 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54,
- 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54,
- 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
- 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f,
- 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13,
- 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa,
- 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-}
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+ "\n" +
+ " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+ "\x11FileDescriptorSet\x128\n" +
+ "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" +
+ "\x13FileDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+ "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+ "\n" +
+ "dependency\x18\x03 \x03(\tR\n" +
+ "dependency\x12+\n" +
+ "\x11public_dependency\x18\n" +
+ " \x03(\x05R\x10publicDependency\x12'\n" +
+ "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" +
+ "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+ "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+ "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+ "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+ "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+ "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+ "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+ "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" +
+ "\x0fDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+ "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+ "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+ "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+ "nestedType\x12A\n" +
+ "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+ "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+ "\n" +
+ "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+ "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+ "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\n" +
+ " \x03(\tR\freservedName\x1az\n" +
+ "\x0eExtensionRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+ "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+ "\rReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+ "\x15ExtensionRangeOptions\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+ "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+ "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+ "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+ "\vDeclaration\x12\x16\n" +
+ "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+ "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+ "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+ "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+ "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+ "\x11VerificationState\x12\x0f\n" +
+ "\vDECLARATION\x10\x00\x12\x0e\n" +
+ "\n" +
+ "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+ "\x14FieldDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+ "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+ "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+ "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+ "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+ "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+ "\voneof_index\x18\t \x01(\x05R\n" +
+ "oneofIndex\x12\x1b\n" +
+ "\tjson_name\x18\n" +
+ " \x01(\tR\bjsonName\x127\n" +
+ "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+ "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+ "\x04Type\x12\x0f\n" +
+ "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+ "\n" +
+ "TYPE_FLOAT\x10\x02\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT64\x10\x03\x12\x0f\n" +
+ "\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT32\x10\x05\x12\x10\n" +
+ "\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+ "\fTYPE_FIXED32\x10\a\x12\r\n" +
+ "\tTYPE_BOOL\x10\b\x12\x0f\n" +
+ "\vTYPE_STRING\x10\t\x12\x0e\n" +
+ "\n" +
+ "TYPE_GROUP\x10\n" +
+ "\x12\x10\n" +
+ "\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+ "\n" +
+ "TYPE_BYTES\x10\f\x12\x0f\n" +
+ "\vTYPE_UINT32\x10\r\x12\r\n" +
+ "\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+ "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+ "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+ "\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+ "\vTYPE_SINT64\x10\x12\"C\n" +
+ "\x05Label\x12\x12\n" +
+ "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+ "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+ "\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+ "\x14OneofDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+ "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" +
+ "\x13EnumDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+ "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+ "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" +
+ "\x11EnumReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+ "\x18EnumValueDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+ "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+ "\x16ServiceDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+ "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+ "\x15MethodDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+ "\n" +
+ "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+ "\voutput_type\x18\x03 \x01(\tR\n" +
+ "outputType\x128\n" +
+ "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+ "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+ "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+ "\vFileOptions\x12!\n" +
+ "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+ "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+ "\x13java_multiple_files\x18\n" +
+ " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+ "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+ "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+ "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+ "\n" +
+ "go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+ "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+ "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+ "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+ "\n" +
+ "deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+ "deprecated\x12.\n" +
+ "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+ "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+ "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+ "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+ "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+ "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+ "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+ "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+ "\fOptimizeMode\x12\t\n" +
+ "\x05SPEED\x10\x01\x12\r\n" +
+ "\tCODE_SIZE\x10\x02\x12\x10\n" +
+ "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+ "\x0eMessageOptions\x12<\n" +
+ "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+ "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x1b\n" +
+ "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+ "\"\x9d\r\n" +
+ "\fFieldOptions\x12A\n" +
+ "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+ "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+ "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+ "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+ "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x19\n" +
+ "\x04weak\x18\n" +
+ " \x01(\b:\x05falseR\x04weak\x12(\n" +
+ "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+ "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+ "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+ "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+ "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+ "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+ "\x0eEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+ "\x0eFeatureSupport\x12G\n" +
+ "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+ "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+ "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+ "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+ "\x05CType\x12\n" +
+ "\n" +
+ "\x06STRING\x10\x00\x12\b\n" +
+ "\x04CORD\x10\x01\x12\x10\n" +
+ "\fSTRING_PIECE\x10\x02\"5\n" +
+ "\x06JSType\x12\r\n" +
+ "\tJS_NORMAL\x10\x00\x12\r\n" +
+ "\tJS_STRING\x10\x01\x12\r\n" +
+ "\tJS_NUMBER\x10\x02\"U\n" +
+ "\x0fOptionRetention\x12\x15\n" +
+ "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+ "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+ "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+ "\x10OptionTargetType\x12\x17\n" +
+ "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+ "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+ "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+ "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+ "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+ "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+ "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+ "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+ "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+ "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+ "\fOneofOptions\x127\n" +
+ "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+ "\vEnumOptions\x12\x1f\n" +
+ "\vallow_alias\x18\x02 \x01(\bR\n" +
+ "allowAlias\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+ "\x10EnumValueOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+ "deprecated\x127\n" +
+ "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+ "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+ "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+ "\x0eServiceOptions\x127\n" +
+ "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+ "\rMethodOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12q\n" +
+ "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+ "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+ "\x10IdempotencyLevel\x12\x17\n" +
+ "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+ "\n" +
+ "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+ "\x13UninterpretedOption\x12A\n" +
+ "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+ "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+ "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+ "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+ "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+ "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+ "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+ "\bNamePart\x12\x1b\n" +
+ "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+ "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" +
+ "\n" +
+ "FeatureSet\x12\x91\x01\n" +
+ "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+ "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+ "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+ "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+ "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+ "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+ "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+ "jsonFormat\x12\xab\x01\n" +
+ "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" +
+ "\rFieldPresence\x12\x1a\n" +
+ "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+ "\bEXPLICIT\x10\x01\x12\f\n" +
+ "\bIMPLICIT\x10\x02\x12\x13\n" +
+ "\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+ "\bEnumType\x12\x15\n" +
+ "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+ "\x04OPEN\x10\x01\x12\n" +
+ "\n" +
+ "\x06CLOSED\x10\x02\"V\n" +
+ "\x15RepeatedFieldEncoding\x12#\n" +
+ "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06PACKED\x10\x01\x12\f\n" +
+ "\bEXPANDED\x10\x02\"I\n" +
+ "\x0eUtf8Validation\x12\x1b\n" +
+ "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06VERIFY\x10\x02\x12\b\n" +
+ "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+ "\x0fMessageEncoding\x12\x1c\n" +
+ "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+ "\tDELIMITED\x10\x02\"H\n" +
+ "\n" +
+ "JsonFormat\x12\x17\n" +
+ "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+ "\x05ALLOW\x10\x01\x12\x16\n" +
+ "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+ "\x12EnforceNamingStyle\x12 \n" +
+ "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+ "\tSTYLE2024\x10\x01\x12\x10\n" +
+ "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+ "\x12FeatureSetDefaults\x12X\n" +
+ "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+ "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+ "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+ "\x18FeatureSetEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+ "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+ "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+ "\x0eSourceCodeInfo\x12D\n" +
+ "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+ "\bLocation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+ "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+ "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+ "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+ "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+ "\x11GeneratedCodeInfo\x12M\n" +
+ "\n" +
+ "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+ "annotation\x1a\xeb\x01\n" +
+ "\n" +
+ "Annotation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+ "\vsource_file\x18\x02 \x01(\tR\n" +
+ "sourceFile\x12\x14\n" +
+ "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+ "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+ "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+ "\bSemantic\x12\b\n" +
+ "\x04NONE\x10\x00\x12\a\n" +
+ "\x03SET\x10\x01\x12\t\n" +
+ "\x05ALIAS\x10\x02*\xa7\x02\n" +
+ "\aEdition\x12\x13\n" +
+ "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+ "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+ "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+ "\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+ "\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+ "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+ "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+ "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+ "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" +
+ "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
var (
file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
- file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc
+ file_google_protobuf_descriptor_proto_rawDescData []byte
)
func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() {
- file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData)
+ file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)))
})
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18)
var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
var file_google_protobuf_descriptor_proto_goTypes = []any{
(Edition)(0), // 0: google.protobuf.Edition
@@ -5260,124 +4851,126 @@ var file_google_protobuf_descriptor_proto_goTypes = []any{
(FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
(FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
(FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 27: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption
- (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet
- (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults
- (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
- (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport
- (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation
+ (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 28: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption
+ (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet
+ (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults
+ (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault
+ (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport
+ (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
- 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+ 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
- 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+ 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+ 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
- 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
- 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
- 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
- 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
- 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 77, // [77:77] is the sub-list for method output_type
- 77, // [77:77] is the sub-list for method input_type
- 77, // [77:77] is the sub-list for extension type_name
- 77, // [77:77] is the sub-list for extension extendee
- 0, // [0:77] is the sub-list for field type_name
+ 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+ 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+ 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+ 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+ 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+ 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+ 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 78, // [78:78] is the sub-list for method output_type
+ 78, // [78:78] is the sub-list for method input_type
+ 78, // [78:78] is the sub-list for extension type_name
+ 78, // [78:78] is the sub-list for extension extendee
+ 0, // [0:78] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5385,430 +4978,12 @@ func file_google_protobuf_descriptor_proto_init() {
if File_google_protobuf_descriptor_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*FieldDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*OneofDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*MethodDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*FileOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*MessageOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*OneofOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*EnumOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*MethodOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ExtensionRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions_Declaration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_EditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_FeatureSupport); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption_NamePart); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo_Location); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo_Annotation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
- NumEnums: 17,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
+ NumEnums: 18,
NumMessages: 33,
NumExtensions: 0,
NumServices: 0,
@@ -5819,7 +4994,6 @@ func file_google_protobuf_descriptor_proto_init() {
MessageInfos: file_google_protobuf_descriptor_proto_msgTypes,
}.Build()
File_google_protobuf_descriptor_proto = out.File
- file_google_protobuf_descriptor_proto_rawDesc = nil
file_google_protobuf_descriptor_proto_goTypes = nil
file_google_protobuf_descriptor_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index a2ca940c5..37e712b6b 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -16,24 +16,153 @@ import (
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
-type GoFeatures struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+type GoFeatures_APILevel int32
+
+const (
+ // API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
+ // but needs to be a separate value to distinguish between
+ // an explicitly set api level or a missing api level.
+ GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
+ GoFeatures_API_OPEN GoFeatures_APILevel = 1
+ GoFeatures_API_HYBRID GoFeatures_APILevel = 2
+ GoFeatures_API_OPAQUE GoFeatures_APILevel = 3
+)
+
+// Enum value maps for GoFeatures_APILevel.
+var (
+ GoFeatures_APILevel_name = map[int32]string{
+ 0: "API_LEVEL_UNSPECIFIED",
+ 1: "API_OPEN",
+ 2: "API_HYBRID",
+ 3: "API_OPAQUE",
+ }
+ GoFeatures_APILevel_value = map[string]int32{
+ "API_LEVEL_UNSPECIFIED": 0,
+ "API_OPEN": 1,
+ "API_HYBRID": 2,
+ "API_OPAQUE": 3,
+ }
+)
+
+func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
+ p := new(GoFeatures_APILevel)
+ *p = x
+ return p
+}
+
+func (x GoFeatures_APILevel) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
+}
+
+func (GoFeatures_APILevel) Type() protoreflect.EnumType {
+ return &file_google_protobuf_go_features_proto_enumTypes[0]
+}
+
+func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = GoFeatures_APILevel(num)
+ return nil
+}
+
+// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
+func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
+}
+type GoFeatures_StripEnumPrefix int32
+
+const (
+ GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0
+ GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1
+ GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
+ GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3
+)
+
+// Enum value maps for GoFeatures_StripEnumPrefix.
+var (
+ GoFeatures_StripEnumPrefix_name = map[int32]string{
+ 0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
+ 1: "STRIP_ENUM_PREFIX_KEEP",
+ 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
+ 3: "STRIP_ENUM_PREFIX_STRIP",
+ }
+ GoFeatures_StripEnumPrefix_value = map[string]int32{
+ "STRIP_ENUM_PREFIX_UNSPECIFIED": 0,
+ "STRIP_ENUM_PREFIX_KEEP": 1,
+ "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
+ "STRIP_ENUM_PREFIX_STRIP": 3,
+ }
+)
+
+func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
+ p := new(GoFeatures_StripEnumPrefix)
+ *p = x
+ return p
+}
+
+func (x GoFeatures_StripEnumPrefix) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
+}
+
+func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
+ return &file_google_protobuf_go_features_proto_enumTypes[1]
+}
+
+func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = GoFeatures_StripEnumPrefix(num)
+ return nil
+}
+
+// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
+func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type GoFeatures struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Whether or not to generate the deprecated UnmarshalJSON method for enums.
+ // Can only be true for proto using the Open Struct api.
LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
+ // One of OPEN, HYBRID or OPAQUE.
+ ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
+ StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GoFeatures) Reset() {
*x = GoFeatures{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GoFeatures) String() string {
@@ -44,7 +173,7 @@ func (*GoFeatures) ProtoMessage() {}
func (x *GoFeatures) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -66,6 +195,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
return false
}
+func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
+ if x != nil && x.ApiLevel != nil {
+ return *x.ApiLevel
+ }
+ return GoFeatures_API_LEVEL_UNSPECIFIED
+}
+
+func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
+ if x != nil && x.StripEnumPrefix != nil {
+ return *x.StripEnumPrefix
+ }
+ return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
+}
+
var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FeatureSet)(nil),
@@ -85,59 +228,60 @@ var (
var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
-var file_google_protobuf_go_features_proto_rawDesc = []byte{
- 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
- 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
- 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
- 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
- 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
- 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
- 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
- 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
- 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
- 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
- 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12,
- 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
-}
+const file_google_protobuf_go_features_proto_rawDesc = "" +
+ "\n" +
+ "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
+ "\n" +
+ "GoFeatures\x12\xbe\x01\n" +
+ "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
+ "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
+ "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
+ "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
+ "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
+ "\bAPILevel\x12\x19\n" +
+ "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
+ "\bAPI_OPEN\x10\x01\x12\x0e\n" +
+ "\n" +
+ "API_HYBRID\x10\x02\x12\x0e\n" +
+ "\n" +
+ "API_OPAQUE\x10\x03\"\x92\x01\n" +
+ "\x0fStripEnumPrefix\x12!\n" +
+ "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
+ "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
+ "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
+ "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
+ "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
var (
file_google_protobuf_go_features_proto_rawDescOnce sync.Once
- file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc
+ file_google_protobuf_go_features_proto_rawDescData []byte
)
func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
- file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData)
+ file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)))
})
return file_google_protobuf_go_features_proto_rawDescData
}
+var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_protobuf_go_features_proto_goTypes = []any{
- (*GoFeatures)(nil), // 0: pb.GoFeatures
- (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
+ (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel
+ (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
+ (*GoFeatures)(nil), // 2: pb.GoFeatures
+ (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
}
var file_google_protobuf_go_features_proto_depIdxs = []int32{
- 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet
- 0, // 1: pb.go:type_name -> pb.GoFeatures
- 2, // [2:2] is the sub-list for method output_type
- 2, // [2:2] is the sub-list for method input_type
- 1, // [1:2] is the sub-list for extension type_name
- 0, // [0:1] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
+ 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
+ 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
+ 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
+ 2, // 3: pb.go:type_name -> pb.GoFeatures
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 3, // [3:4] is the sub-list for extension type_name
+ 2, // [2:3] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
}
func init() { file_google_protobuf_go_features_proto_init() }
@@ -145,37 +289,23 @@ func file_google_protobuf_go_features_proto_init() {
if File_google_protobuf_go_features_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*GoFeatures); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
- NumEnums: 0,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)),
+ NumEnums: 2,
NumMessages: 1,
NumExtensions: 1,
NumServices: 0,
},
GoTypes: file_google_protobuf_go_features_proto_goTypes,
DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
+ EnumInfos: file_google_protobuf_go_features_proto_enumTypes,
MessageInfos: file_google_protobuf_go_features_proto_msgTypes,
ExtensionInfos: file_google_protobuf_go_features_proto_extTypes,
}.Build()
File_google_protobuf_go_features_proto = out.File
- file_google_protobuf_go_features_proto_rawDesc = nil
file_google_protobuf_go_features_proto_goTypes = nil
file_google_protobuf_go_features_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 7172b43d3..1ff0d1494 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -122,6 +122,7 @@ import (
reflect "reflect"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -210,10 +211,7 @@ import (
// "value": "1.212s"
// }
type Any struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
@@ -244,7 +242,9 @@ type Any struct {
// used with implementation specific semantics.
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New marshals src into a new Any instance.
@@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_any_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {}
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_any_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -414,32 +412,22 @@ func (x *Any) GetValue() []byte {
var File_google_protobuf_any_proto protoreflect.FileDescriptor
-var file_google_protobuf_any_proto_rawDesc = []byte{
- 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
- 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
- 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
- 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_any_proto_rawDesc = "" +
+ "\n" +
+ "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+ "\x03Any\x12\x19\n" +
+ "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+ "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_any_proto_rawDescOnce sync.Once
- file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+ file_google_protobuf_any_proto_rawDescData []byte
)
func file_google_protobuf_any_proto_rawDescGZIP() []byte {
file_google_protobuf_any_proto_rawDescOnce.Do(func() {
- file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)))
})
return file_google_protobuf_any_proto_rawDescData
}
@@ -461,25 +449,11 @@ func file_google_protobuf_any_proto_init() {
if File_google_protobuf_any_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -490,7 +464,6 @@ func file_google_protobuf_any_proto_init() {
MessageInfos: file_google_protobuf_any_proto_msgTypes,
}.Build()
File_google_protobuf_any_proto = out.File
- file_google_protobuf_any_proto_rawDesc = nil
file_google_protobuf_any_proto_goTypes = nil
file_google_protobuf_any_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 1b71bcd91..ca2e7b38f 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -80,6 +80,7 @@ import (
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Duration represents a signed, fixed-length span of time represented
@@ -141,10 +142,7 @@ import (
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
type Duration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
@@ -155,7 +153,9 @@ type Duration struct {
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New constructs a new Duration from the provided time.Duration.
@@ -245,11 +245,9 @@ func (x *Duration) check() uint {
func (x *Duration) Reset() {
*x = Duration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Duration) String() string {
@@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {}
func (x *Duration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_duration_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -291,33 +289,22 @@ func (x *Duration) GetNanos() int32 {
var File_google_protobuf_duration_proto protoreflect.FileDescriptor
-var file_google_protobuf_duration_proto_rawDesc = []byte{
- 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
- 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
- 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
- 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
- 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_duration_proto_rawDesc = "" +
+ "\n" +
+ "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
+ "\bDuration\x12\x18\n" +
+ "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+ "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
+ "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_duration_proto_rawDescOnce sync.Once
- file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+ file_google_protobuf_duration_proto_rawDescData []byte
)
func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
- file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+ file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)))
})
return file_google_protobuf_duration_proto_rawDescData
}
@@ -339,25 +326,11 @@ func file_google_protobuf_duration_proto_init() {
if File_google_protobuf_duration_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Duration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -368,7 +341,6 @@ func file_google_protobuf_duration_proto_init() {
MessageInfos: file_google_protobuf_duration_proto_msgTypes,
}.Build()
File_google_protobuf_duration_proto = out.File
- file_google_protobuf_duration_proto_rawDesc = nil
file_google_protobuf_duration_proto_goTypes = nil
file_google_protobuf_duration_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index d87b4fb82..1d7ee3b47 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -38,6 +38,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// A generic empty message that you can re-use to avoid defining duplicated
@@ -48,18 +49,16 @@ import (
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
// }
type Empty struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Empty) Reset() {
*x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_empty_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Empty) String() string {
@@ -70,7 +69,7 @@ func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_empty_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -87,29 +86,21 @@ func (*Empty) Descriptor() ([]byte, []int) {
var File_google_protobuf_empty_proto protoreflect.FileDescriptor
-var file_google_protobuf_empty_proto_rawDesc = []byte{
- 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
- 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
- 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
- 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
- 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
- 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_empty_proto_rawDesc = "" +
+ "\n" +
+ "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
+ "\x05EmptyB}\n" +
+ "\x13com.google.protobufB\n" +
+ "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_empty_proto_rawDescOnce sync.Once
- file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
+ file_google_protobuf_empty_proto_rawDescData []byte
)
func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
- file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
+ file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)))
})
return file_google_protobuf_empty_proto_rawDescData
}
@@ -131,25 +122,11 @@ func file_google_protobuf_empty_proto_init() {
if File_google_protobuf_empty_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -160,7 +137,6 @@ func file_google_protobuf_empty_proto_init() {
MessageInfos: file_google_protobuf_empty_proto_msgTypes,
}.Build()
File_google_protobuf_empty_proto = out.File
- file_google_protobuf_empty_proto_rawDesc = nil
file_google_protobuf_empty_proto_goTypes = nil
file_google_protobuf_empty_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index ac1e91bb6..91ee89a5c 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -83,6 +83,7 @@ import (
sort "sort"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `FieldMask` represents a set of symbolic field paths, for example:
@@ -284,12 +285,11 @@ import (
// request should verify the included field paths, and return an
// `INVALID_ARGUMENT` error if any path is unmappable.
type FieldMask struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New constructs a field mask from a list of paths and verifies that
@@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool {
func (x *FieldMask) Reset() {
*x = FieldMask{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldMask) String() string {
@@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {}
func (x *FieldMask) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -506,32 +504,21 @@ func (x *FieldMask) GetPaths() []string {
var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
-var file_google_protobuf_field_mask_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
- 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
- 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
- 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
- 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_field_mask_proto_rawDesc = "" +
+ "\n" +
+ " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
+ "\tFieldMask\x12\x14\n" +
+ "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
+ "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
- file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+ file_google_protobuf_field_mask_proto_rawDescData []byte
)
func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
- file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+ file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)))
})
return file_google_protobuf_field_mask_proto_rawDescData
}
@@ -553,25 +540,11 @@ func file_google_protobuf_field_mask_proto_init() {
if File_google_protobuf_field_mask_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FieldMask); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -582,7 +555,6 @@ func file_google_protobuf_field_mask_proto_init() {
MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
}.Build()
File_google_protobuf_field_mask_proto = out.File
- file_google_protobuf_field_mask_proto_rawDesc = nil
file_google_protobuf_field_mask_proto_goTypes = nil
file_google_protobuf_field_mask_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d45361cbc..30411b728 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -120,6 +120,7 @@ package structpb
import (
base64 "encoding/base64"
+ json "encoding/json"
protojson "google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -127,6 +128,7 @@ import (
reflect "reflect"
sync "sync"
utf8 "unicode/utf8"
+ unsafe "unsafe"
)
// `NullValue` is a singleton enumeration to represent the null value for the
@@ -186,12 +188,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) {
//
// The JSON representation for `Struct` is JSON object.
type Struct struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewStruct constructs a Struct from a general-purpose Go map.
@@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error {
func (x *Struct) Reset() {
*x = Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Struct) String() string {
@@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {}
func (x *Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -277,13 +276,10 @@ func (x *Struct) GetFields() map[string]*Value {
//
// The JSON representation for `Value` is JSON value.
type Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The kind of value.
//
- // Types that are assignable to Kind:
+ // Types that are valid to be assigned to Kind:
//
// *Value_NullValue
// *Value_NumberValue
@@ -291,24 +287,27 @@ type Value struct {
// *Value_BoolValue
// *Value_StructValue
// *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewValue constructs a Value from a general-purpose Go interface.
//
-// ╔════════════════════════╤════════════════════════════════════════════╗
-// ║ Go type │ Conversion ║
-// ╠════════════════════════╪════════════════════════════════════════════╣
-// ║ nil │ stored as NullValue ║
-// ║ bool │ stored as BoolValue ║
-// ║ int, int32, int64 │ stored as NumberValue ║
-// ║ uint, uint32, uint64 │ stored as NumberValue ║
-// ║ float32, float64 │ stored as NumberValue ║
-// ║ string │ stored as StringValue; must be valid UTF-8 ║
-// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]any │ stored as StructValue ║
-// ║ []any │ stored as ListValue ║
-// ╚════════════════════════╧════════════════════════════════════════════╝
+// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+// ║ Go type │ Conversion ║
+// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+// ║ nil │ stored as NullValue ║
+// ║ bool │ stored as BoolValue ║
+// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║
+// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║
+// ║ float32, float64 │ stored as NumberValue ║
+// ║ json.Number │ stored as NumberValue ║
+// ║ string │ stored as StringValue; must be valid UTF-8 ║
+// ║ []byte │ stored as StringValue; base64-encoded ║
+// ║ map[string]any │ stored as StructValue ║
+// ║ []any │ stored as ListValue ║
+// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝
//
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
// is possible since they are stored as a float64.
@@ -320,12 +319,20 @@ func NewValue(v any) (*Value, error) {
return NewBoolValue(v), nil
case int:
return NewNumberValue(float64(v)), nil
+ case int8:
+ return NewNumberValue(float64(v)), nil
+ case int16:
+ return NewNumberValue(float64(v)), nil
case int32:
return NewNumberValue(float64(v)), nil
case int64:
return NewNumberValue(float64(v)), nil
case uint:
return NewNumberValue(float64(v)), nil
+ case uint8:
+ return NewNumberValue(float64(v)), nil
+ case uint16:
+ return NewNumberValue(float64(v)), nil
case uint32:
return NewNumberValue(float64(v)), nil
case uint64:
@@ -334,6 +341,12 @@ func NewValue(v any) (*Value, error) {
return NewNumberValue(float64(v)), nil
case float64:
return NewNumberValue(float64(v)), nil
+ case json.Number:
+ n, err := v.Float64()
+ if err != nil {
+ return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+ }
+ return NewNumberValue(n), nil
case string:
if !utf8.ValidString(v) {
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
@@ -441,11 +454,9 @@ func (x *Value) UnmarshalJSON(b []byte) error {
func (x *Value) Reset() {
*x = Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -456,7 +467,7 @@ func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -471,51 +482,63 @@ func (*Value) Descriptor() ([]byte, []int) {
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
}
-func (m *Value) GetKind() isValue_Kind {
- if m != nil {
- return m.Kind
+func (x *Value) GetKind() isValue_Kind {
+ if x != nil {
+ return x.Kind
}
return nil
}
func (x *Value) GetNullValue() NullValue {
- if x, ok := x.GetKind().(*Value_NullValue); ok {
- return x.NullValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_NullValue); ok {
+ return x.NullValue
+ }
}
return NullValue_NULL_VALUE
}
func (x *Value) GetNumberValue() float64 {
- if x, ok := x.GetKind().(*Value_NumberValue); ok {
- return x.NumberValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
}
return 0
}
func (x *Value) GetStringValue() string {
- if x, ok := x.GetKind().(*Value_StringValue); ok {
- return x.StringValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_StringValue); ok {
+ return x.StringValue
+ }
}
return ""
}
func (x *Value) GetBoolValue() bool {
- if x, ok := x.GetKind().(*Value_BoolValue); ok {
- return x.BoolValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
}
return false
}
func (x *Value) GetStructValue() *Struct {
- if x, ok := x.GetKind().(*Value_StructValue); ok {
- return x.StructValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_StructValue); ok {
+ return x.StructValue
+ }
}
return nil
}
func (x *Value) GetListValue() *ListValue {
- if x, ok := x.GetKind().(*Value_ListValue); ok {
- return x.ListValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_ListValue); ok {
+ return x.ListValue
+ }
}
return nil
}
@@ -570,12 +593,11 @@ func (*Value_ListValue) isValue_Kind() {}
//
// The JSON representation for `ListValue` is JSON array.
type ListValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewList constructs a ListValue from a general-purpose Go slice.
@@ -613,11 +635,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error {
func (x *ListValue) Reset() {
*x = ListValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListValue) String() string {
@@ -628,7 +648,7 @@ func (*ListValue) ProtoMessage() {}
func (x *ListValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -652,64 +672,40 @@ func (x *ListValue) GetValues() []*Value {
var File_google_protobuf_struct_proto protoreflect.FileDescriptor
-var file_google_protobuf_struct_proto_rawDesc = []byte{
- 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
- 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
- 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
- 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
- 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
- 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
- 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
- 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
- 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
- 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
- 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
- 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
- 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
- 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
- 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
- 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
- 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-}
+const file_google_protobuf_struct_proto_rawDesc = "" +
+ "\n" +
+ "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
+ "\x06Struct\x12;\n" +
+ "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
+ "\vFieldsEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
+ "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
+ "\x05Value\x12;\n" +
+ "\n" +
+ "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
+ "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
+ "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
+ "\n" +
+ "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
+ "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
+ "\n" +
+ "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
+ "\x04kind\";\n" +
+ "\tListValue\x12.\n" +
+ "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
+ "\tNullValue\x12\x0e\n" +
+ "\n" +
+ "NULL_VALUE\x10\x00B\x7f\n" +
+ "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_struct_proto_rawDescOnce sync.Once
- file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+ file_google_protobuf_struct_proto_rawDescData []byte
)
func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
- file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+ file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)))
})
return file_google_protobuf_struct_proto_rawDescData
}
@@ -742,44 +738,6 @@ func file_google_protobuf_struct_proto_init() {
if File_google_protobuf_struct_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ListValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
@@ -792,7 +750,7 @@ func file_google_protobuf_struct_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)),
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
@@ -804,7 +762,6 @@ func file_google_protobuf_struct_proto_init() {
MessageInfos: file_google_protobuf_struct_proto_msgTypes,
}.Build()
File_google_protobuf_struct_proto = out.File
- file_google_protobuf_struct_proto_rawDesc = nil
file_google_protobuf_struct_proto_goTypes = nil
file_google_protobuf_struct_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 83a5a645b..06d584c14 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -78,6 +78,7 @@ import (
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Timestamp represents a point in time independent of any time zone or local
@@ -170,10 +171,7 @@ import (
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
@@ -182,7 +180,9 @@ type Timestamp struct {
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Now constructs a new Timestamp from the current time.
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
func (x *Timestamp) Reset() {
*x = Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
func (x *Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -300,33 +298,22 @@ func (x *Timestamp) GetNanos() int32 {
var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
-var file_google_protobuf_timestamp_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
- 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
- 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
- 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
- 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
- 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
- 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+ "\n" +
+ "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+ "\tTimestamp\x12\x18\n" +
+ "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+ "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+ "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
- file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+ file_google_protobuf_timestamp_proto_rawDescData []byte
)
func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
- file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)))
})
return file_google_protobuf_timestamp_proto_rawDescData
}
@@ -348,25 +335,11 @@ func file_google_protobuf_timestamp_proto_init() {
if File_google_protobuf_timestamp_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -377,7 +350,6 @@ func file_google_protobuf_timestamp_proto_init() {
MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
}.Build()
File_google_protobuf_timestamp_proto = out.File
- file_google_protobuf_timestamp_proto_rawDesc = nil
file_google_protobuf_timestamp_proto_goTypes = nil
file_google_protobuf_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index e473f826a..b7c2d0607 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -28,10 +28,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
+// Wrappers for primitive (non-message) types. These types were needed
+// for legacy reasons and are not recommended for use in new APIs.
+//
+// Historically these wrappers were useful to have presence on proto3 primitive
+// fields, but proto3 syntax has been updated to support the `optional` keyword.
+// Using that keyword is now the strongly preferred way to add presence to
+// proto3 primitive fields.
+//
+// A secondary usecase was to embed primitives in the `google.protobuf.Any`
+// type: it is now recommended that you embed your value in your own wrapper
+// message which can be specifically documented.
//
// These wrappers have no meaningful use within repeated fields as they lack
// the ability to detect presence on individual elements.
@@ -48,18 +55,21 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type DoubleValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The double value.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Double stores v in a new DoubleValue and returns a pointer to it.
@@ -69,11 +79,9 @@ func Double(v float64) *DoubleValue {
func (x *DoubleValue) Reset() {
*x = DoubleValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DoubleValue) String() string {
@@ -84,7 +92,7 @@ func (*DoubleValue) ProtoMessage() {}
func (x *DoubleValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -109,13 +117,15 @@ func (x *DoubleValue) GetValue() float64 {
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type FloatValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The float value.
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Float stores v in a new FloatValue and returns a pointer to it.
@@ -125,11 +135,9 @@ func Float(v float32) *FloatValue {
func (x *FloatValue) Reset() {
*x = FloatValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FloatValue) String() string {
@@ -140,7 +148,7 @@ func (*FloatValue) ProtoMessage() {}
func (x *FloatValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -165,13 +173,15 @@ func (x *FloatValue) GetValue() float32 {
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type Int64Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The int64 value.
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Int64 stores v in a new Int64Value and returns a pointer to it.
@@ -181,11 +191,9 @@ func Int64(v int64) *Int64Value {
func (x *Int64Value) Reset() {
*x = Int64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int64Value) String() string {
@@ -196,7 +204,7 @@ func (*Int64Value) ProtoMessage() {}
func (x *Int64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -221,13 +229,15 @@ func (x *Int64Value) GetValue() int64 {
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type UInt64Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The uint64 value.
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// UInt64 stores v in a new UInt64Value and returns a pointer to it.
@@ -237,11 +247,9 @@ func UInt64(v uint64) *UInt64Value {
func (x *UInt64Value) Reset() {
*x = UInt64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt64Value) String() string {
@@ -252,7 +260,7 @@ func (*UInt64Value) ProtoMessage() {}
func (x *UInt64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -277,13 +285,15 @@ func (x *UInt64Value) GetValue() uint64 {
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type Int32Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The int32 value.
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Int32 stores v in a new Int32Value and returns a pointer to it.
@@ -293,11 +303,9 @@ func Int32(v int32) *Int32Value {
func (x *Int32Value) Reset() {
*x = Int32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int32Value) String() string {
@@ -308,7 +316,7 @@ func (*Int32Value) ProtoMessage() {}
func (x *Int32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -333,13 +341,15 @@ func (x *Int32Value) GetValue() int32 {
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type UInt32Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The uint32 value.
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// UInt32 stores v in a new UInt32Value and returns a pointer to it.
@@ -349,11 +359,9 @@ func UInt32(v uint32) *UInt32Value {
func (x *UInt32Value) Reset() {
*x = UInt32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt32Value) String() string {
@@ -364,7 +372,7 @@ func (*UInt32Value) ProtoMessage() {}
func (x *UInt32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -389,13 +397,15 @@ func (x *UInt32Value) GetValue() uint32 {
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type BoolValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The bool value.
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Bool stores v in a new BoolValue and returns a pointer to it.
@@ -405,11 +415,9 @@ func Bool(v bool) *BoolValue {
func (x *BoolValue) Reset() {
*x = BoolValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BoolValue) String() string {
@@ -420,7 +428,7 @@ func (*BoolValue) ProtoMessage() {}
func (x *BoolValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -445,13 +453,15 @@ func (x *BoolValue) GetValue() bool {
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type StringValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The string value.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// String stores v in a new StringValue and returns a pointer to it.
@@ -461,11 +471,9 @@ func String(v string) *StringValue {
func (x *StringValue) Reset() {
*x = StringValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringValue) String() string {
@@ -476,7 +484,7 @@ func (*StringValue) ProtoMessage() {}
func (x *StringValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -501,13 +509,15 @@ func (x *StringValue) GetValue() string {
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type BytesValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The bytes value.
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Bytes stores v in a new BytesValue and returns a pointer to it.
@@ -517,11 +527,9 @@ func Bytes(v []byte) *BytesValue {
func (x *BytesValue) Reset() {
*x = BytesValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BytesValue) String() string {
@@ -532,7 +540,7 @@ func (*BytesValue) ProtoMessage() {}
func (x *BytesValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -556,50 +564,41 @@ func (x *BytesValue) GetValue() []byte {
var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
-var file_google_protobuf_wrappers_proto_rawDesc = []byte{
- 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
- 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
- 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
- 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
- 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
- 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
- 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
- 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_wrappers_proto_rawDesc = "" +
+ "\n" +
+ "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
+ "\vDoubleValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
+ "\n" +
+ "FloatValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
+ "\n" +
+ "Int64Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
+ "\vUInt64Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
+ "\n" +
+ "Int32Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
+ "\vUInt32Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
+ "\tBoolValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
+ "\vStringValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
+ "\n" +
+ "BytesValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
+ "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
- file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+ file_google_protobuf_wrappers_proto_rawDescData []byte
)
func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
- file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+ file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)))
})
return file_google_protobuf_wrappers_proto_rawDescData
}
@@ -629,121 +628,11 @@ func file_google_protobuf_wrappers_proto_init() {
if File_google_protobuf_wrappers_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*DoubleValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FloatValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Int64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*UInt64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Int32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*UInt32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*BoolValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*StringValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*BytesValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -754,7 +643,6 @@ func file_google_protobuf_wrappers_proto_init() {
MessageInfos: file_google_protobuf_wrappers_proto_msgTypes,
}.Build()
File_google_protobuf_wrappers_proto = out.File
- file_google_protobuf_wrappers_proto_rawDesc = nil
file_google_protobuf_wrappers_proto_goTypes = nil
file_google_protobuf_wrappers_proto_depIdxs = nil
}
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
new file mode 100644
index 000000000..b7ed7f956
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
@@ -0,0 +1,6 @@
+# editor and IDE paraphernalia
+.idea
+.vscode
+
+# macOS paraphernalia
+.DS_Store
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
new file mode 100644
index 000000000..df76d7d77
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
new file mode 100644
index 000000000..28e351693
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/README.md
@@ -0,0 +1,317 @@
+# JSON-Patch
+`jsonpatch` is a library which provides functionality for both applying
+[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
+well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
+
+[](http://godoc.org/github.com/evanphx/json-patch)
+[](https://travis-ci.org/evanphx/json-patch)
+[](https://goreportcard.com/report/github.com/evanphx/json-patch)
+
+# Get It!
+
+**Latest and greatest**:
+```bash
+go get -u github.com/evanphx/json-patch/v5
+```
+
+**Stable Versions**:
+* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
+* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+
+(previous versions below `v3` are unavailable)
+
+# Use It!
+* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
+* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
+* [Comparing JSON documents](#comparing-json-documents)
+* [Combine merge patches](#combine-merge-patches)
+
+
+# Configuration
+
+* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
+ This defaults to `true` and enables the non-standard practice of allowing
+ negative indices to mean indices starting at the end of an array. This
+ functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
+ false`.
+
+* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
+ which limits the total size increase in bytes caused by "copy" operations in a
+ patch. It defaults to 0, which means there is no limit.
+
+These global variables control the behavior of `jsonpatch.Apply`.
+
+An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
+is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
+
+Structure `jsonpatch.ApplyOptions` includes the configuration options above
+and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
+
+When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
+`remove` operations whose `path` points to a non-existent location in the JSON document.
+`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
+returning an error when hitting a missing `path` on `remove`.
+
+When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
+that `add` operations produce all the `path` elements that are missing from the target object.
+
+Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
+whose values are populated from the global configuration variables.
+
+## Create and apply a merge patch
+Given both an original JSON document and a modified JSON document, you can create
+a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
+
+It can describe the changes needed to convert from the original to the
+modified JSON document.
+
+Once you have a merge patch, you can apply it to other JSON documents using the
+`jsonpatch.MergePatch(document, patch)` function.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ // Let's create a merge patch from these two documents...
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ target := []byte(`{"name": "Jane", "age": 24}`)
+
+ patch, err := jsonpatch.CreateMergePatch(original, target)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now lets apply the patch against a different JSON document...
+
+ alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
+ modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
+
+ fmt.Printf("patch document: %s\n", patch)
+ fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+patch document: {"height":null,"name":"Jane"}
+updated alternative doc: {"age":28,"name":"Jane"}
+```
+
+## Create and apply a JSON Patch
+You can create patch objects using `DecodePatch([]byte)`, which can then
+be applied against JSON documents.
+
+The following is an example of creating a patch from two operations, and
+applying it against a JSON document.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ patchJSON := []byte(`[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+ ]`)
+
+ patch, err := jsonpatch.DecodePatch(patchJSON)
+ if err != nil {
+ panic(err)
+ }
+
+ modified, err := patch.Apply(original)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Original document: %s\n", original)
+ fmt.Printf("Modified document: %s\n", modified)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+Original document: {"name": "John", "age": 24, "height": 3.21}
+Modified document: {"age":24,"name":"Jane"}
+```
+
+## Comparing JSON documents
+Due to potential whitespace and ordering differences, one cannot simply compare
+JSON strings or byte-arrays directly.
+
+As such, you can instead use `jsonpatch.Equal(document1, document2)` to
+determine if two JSON documents are _structurally_ equal. This ignores
+whitespace differences, and key-value ordering.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ similar := []byte(`
+ {
+ "age": 24,
+ "height": 3.21,
+ "name": "John"
+ }
+ `)
+ different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
+
+ if jsonpatch.Equal(original, similar) {
+ fmt.Println(`"original" is structurally equal to "similar"`)
+ }
+
+ if !jsonpatch.Equal(original, different) {
+ fmt.Println(`"original" is _not_ structurally equal to "different"`)
+ }
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+"original" is structurally equal to "similar"
+"original" is _not_ structurally equal to "different"
+```
+
+## Combine merge patches
+Given two JSON merge patch documents, it is possible to combine them into a
+single merge patch which can describe both set of changes.
+
+The resulting merge patch can be used such that applying it results in a
+document structurally similar as merging each merge patch to the document
+in succession.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+
+ nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
+ ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
+
+ // Let's combine these merge patch documents...
+ combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply each patch individual against the original document
+ withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
+ if err != nil {
+ panic(err)
+ }
+
+ withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply the combined patch against the original document
+
+ withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
+ if err != nil {
+ panic(err)
+ }
+
+ // Do both result in the same thing? They should!
+ if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
+ fmt.Println("Both JSON documents are structurally the same!")
+ }
+
+ fmt.Printf("combined merge patch: %s", combinedPatch)
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+Both JSON documents are structurally the same!
+combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
+```
+
+# CLI for comparing JSON documents
+You can install the commandline program `json-patch`.
+
+This program can take multiple JSON patch documents as arguments,
+and fed a JSON document from `stdin`. It will apply the patch(es) against
+the document and output the modified doc.
+
+**patch.1.json**
+```json
+[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+]
+```
+
+**patch.2.json**
+```json
+[
+ {"op": "add", "path": "/address", "value": "123 Main St"},
+ {"op": "replace", "path": "/age", "value": "21"}
+]
+```
+
+**document.json**
+```json
+{
+ "name": "John",
+ "age": 24,
+ "height": 3.21
+}
+```
+
+You can then run:
+
+```bash
+$ go install github.com/evanphx/json-patch/cmd/json-patch
+$ cat document.json | json-patch -p patch.1.json -p patch.2.json
+{"address":"123 Main St","age":"21","name":"Jane"}
+```
+
+# Help It!
+Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
+or [create a PR](https://github.com/evanphx/json-patch/compare).
+
+
+Before creating a pull request, we'd ask that you make sure tests are passing
+and that you have added new tests when applicable.
+
+Contributors can run tests using:
+
+```bash
+go test -cover ./...
+```
+
+Builds for pull requests are tested automatically
+using [TravisCI](https://travis-ci.org/evanphx/json-patch).
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go
new file mode 100644
index 000000000..75304b443
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go
@@ -0,0 +1,38 @@
+package jsonpatch
+
+import "fmt"
+
+// AccumulatedCopySizeError is an error type returned when the accumulated size
+// increase caused by copy operations in a patch operation has exceeded the
+// limit.
+type AccumulatedCopySizeError struct {
+ limit int64
+ accumulated int64
+}
+
+// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
+func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
+ return &AccumulatedCopySizeError{limit: l, accumulated: a}
+}
+
+// Error implements the error interface.
+func (a *AccumulatedCopySizeError) Error() string {
+ return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
+}
+
+// ArraySizeError is an error type returned when the array size has exceeded
+// the limit.
+type ArraySizeError struct {
+ limit int
+ size int
+}
+
+// NewArraySizeError returns an ArraySizeError.
+func NewArraySizeError(l, s int) *ArraySizeError {
+ return &ArraySizeError{limit: l, size: s}
+}
+
+// Error implements the error interface.
+func (a *ArraySizeError) Error() string {
+ return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
+}
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go
new file mode 100644
index 000000000..ad88d4018
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go
@@ -0,0 +1,389 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+ curDoc, err := cur.intoDoc()
+
+ if err != nil {
+ pruneNulls(patch)
+ return patch
+ }
+
+ patchDoc, err := patch.intoDoc()
+
+ if err != nil {
+ return patch
+ }
+
+ mergeDocs(curDoc, patchDoc, mergeMerge)
+
+ return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+ for k, v := range *patch {
+ if v == nil {
+ if mergeMerge {
+ (*doc)[k] = nil
+ } else {
+ delete(*doc, k)
+ }
+ } else {
+ cur, ok := (*doc)[k]
+
+ if !ok || cur == nil {
+ if !mergeMerge {
+ pruneNulls(v)
+ }
+
+ (*doc)[k] = v
+ } else {
+ (*doc)[k] = merge(cur, v, mergeMerge)
+ }
+ }
+ }
+}
+
+func pruneNulls(n *lazyNode) {
+ sub, err := n.intoDoc()
+
+ if err == nil {
+ pruneDocNulls(sub)
+ } else {
+ ary, err := n.intoAry()
+
+ if err == nil {
+ pruneAryNulls(ary)
+ }
+ }
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+ for k, v := range *doc {
+ if v == nil {
+ delete(*doc, k)
+ } else {
+ pruneNulls(v)
+ }
+ }
+
+ return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+ newAry := []*lazyNode{}
+
+ for _, v := range *ary {
+ if v != nil {
+ pruneNulls(v)
+ }
+ newAry = append(newAry, v)
+ }
+
+ *ary = newAry
+
+ return ary
+}
+
+var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+ return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+ return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+ doc := &partialDoc{}
+
+ docErr := json.Unmarshal(docData, doc)
+
+ patch := &partialDoc{}
+
+ patchErr := json.Unmarshal(patchData, patch)
+
+ if _, ok := docErr.(*json.SyntaxError); ok {
+ return nil, ErrBadJSONDoc
+ }
+
+ if _, ok := patchErr.(*json.SyntaxError); ok {
+ return nil, ErrBadJSONPatch
+ }
+
+ if docErr == nil && *doc == nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ if patchErr == nil && *patch == nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ if docErr != nil || patchErr != nil {
+ // Not an error, just not a doc, so we turn straight into the patch
+ if patchErr == nil {
+ if mergeMerge {
+ doc = patch
+ } else {
+ doc = pruneDocNulls(patch)
+ }
+ } else {
+ patchAry := &partialArray{}
+ patchErr = json.Unmarshal(patchData, patchAry)
+
+ if patchErr != nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ pruneAryNulls(patchAry)
+
+ out, patchErr := json.Marshal(patchAry)
+
+ if patchErr != nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ return out, nil
+ }
+ } else {
+ mergeDocs(doc, patch, mergeMerge)
+ }
+
+ return json.Marshal(doc)
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+ input = bytes.TrimSpace(input)
+
+ hasPrefix := bytes.HasPrefix(input, []byte("["))
+ hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+ return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalResemblesArray := resemblesJSONArray(originalJSON)
+ modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+ // Do both byte-slices seem like JSON arrays?
+ if originalResemblesArray && modifiedResemblesArray {
+ return createArrayMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // Are both byte-slices are not arrays? Then they are likely JSON objects...
+ if !originalResemblesArray && !modifiedResemblesArray {
+ return createObjectMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // None of the above? Then return an error because of mismatched types.
+ return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDoc := map[string]interface{}{}
+ modifiedDoc := map[string]interface{}{}
+
+ err := json.Unmarshal(originalJSON, &originalDoc)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ dest, err := getDiff(originalDoc, modifiedDoc)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDocs := []json.RawMessage{}
+ modifiedDocs := []json.RawMessage{}
+
+ err := json.Unmarshal(originalJSON, &originalDocs)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ total := len(originalDocs)
+ if len(modifiedDocs) != total {
+ return nil, ErrBadJSONDoc
+ }
+
+ result := []json.RawMessage{}
+ for i := 0; i < len(originalDocs); i++ {
+ original := originalDocs[i]
+ modified := modifiedDocs[i]
+
+ patch, err := createObjectMergePatch(original, modified)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, json.RawMessage(patch))
+ }
+
+ return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ if (a == nil && b != nil) || (a != nil && b == nil) {
+ return false
+ }
+ for i := range a {
+ if !matchesValue(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ return false
+ }
+ switch at := av.(type) {
+ case string:
+ bt := bv.(string)
+ if bt == at {
+ return true
+ }
+ case float64:
+ bt := bv.(float64)
+ if bt == at {
+ return true
+ }
+ case bool:
+ bt := bv.(bool)
+ if bt == at {
+ return true
+ }
+ case nil:
+ // Both nil, fine.
+ return true
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ if len(bt) != len(at) {
+ return false
+ }
+ for key := range bt {
+ av, aOK := at[key]
+ bv, bOK := bt[key]
+ if aOK != bOK {
+ return false
+ }
+ if !matchesValue(av, bv) {
+ return false
+ }
+ }
+ return true
+ case []interface{}:
+ bt := bv.([]interface{})
+ return matchesArray(at, bt)
+ }
+ return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+ into := map[string]interface{}{}
+ for key, bv := range b {
+ av, ok := a[key]
+ // value was added
+ if !ok {
+ into[key] = bv
+ continue
+ }
+ // If types have changed, replace completely
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ into[key] = bv
+ continue
+ }
+ // Types are the same, compare values
+ switch at := av.(type) {
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ dst := make(map[string]interface{}, len(bt))
+ dst, err := getDiff(at, bt)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) > 0 {
+ into[key] = dst
+ }
+ case string, float64, bool:
+ if !matchesValue(av, bv) {
+ into[key] = bv
+ }
+ case []interface{}:
+ bt := bv.([]interface{})
+ if !matchesArray(at, bt) {
+ into[key] = bv
+ }
+ case nil:
+ switch bv.(type) {
+ case nil:
+ // Both nil, fine.
+ default:
+ into[key] = bv
+ }
+ default:
+ panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+ }
+ }
+ // Now add all deleted values as nil
+ for key := range a {
+ _, found := b[key]
+ if !found {
+ into[key] = nil
+ }
+ }
+ return into, nil
+}
diff --git a/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
new file mode 100644
index 000000000..dc2b7e51e
--- /dev/null
+++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
@@ -0,0 +1,851 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ eRaw = iota
+ eDoc
+ eAry
+)
+
+var (
+ // SupportNegativeIndices decides whether to support non-standard practice of
+ // allowing negative indices to mean indices starting at the end of an array.
+ // Default to true.
+ SupportNegativeIndices bool = true
+ // AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+ // "copy" operations in a patch.
+ AccumulatedCopySizeLimit int64 = 0
+)
+
+var (
+ ErrTestFailed = errors.New("test failed")
+ ErrMissing = errors.New("missing value")
+ ErrUnknownType = errors.New("unknown object type")
+ ErrInvalid = errors.New("invalid state detected")
+ ErrInvalidIndex = errors.New("invalid index referenced")
+)
+
+type lazyNode struct {
+ raw *json.RawMessage
+ doc partialDoc
+ ary partialArray
+ which int
+}
+
+// Operation is a single JSON-Patch step, such as a single 'add' operation.
+type Operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of Operations.
+type Patch []Operation
+
+type partialDoc map[string]*lazyNode
+type partialArray []*lazyNode
+
+type container interface {
+ get(key string) (*lazyNode, error)
+ set(key string, val *lazyNode) error
+ add(key string, val *lazyNode) error
+ remove(key string) error
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+ return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+ switch n.which {
+ case eRaw:
+ return json.Marshal(n.raw)
+ case eDoc:
+ return json.Marshal(n.doc)
+ case eAry:
+ return json.Marshal(n.ary)
+ default:
+ return nil, ErrUnknownType
+ }
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+ dest := make(json.RawMessage, len(data))
+ copy(dest, data)
+ n.raw = &dest
+ n.which = eRaw
+ return nil
+}
+
+func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+ if src == nil {
+ return nil, 0, nil
+ }
+ a, err := src.MarshalJSON()
+ if err != nil {
+ return nil, 0, err
+ }
+ sz := len(a)
+ ra := make(json.RawMessage, sz)
+ copy(ra, a)
+ return newLazyNode(&ra), sz, nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+ if n.which == eDoc {
+ return &n.doc, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eDoc
+ return &n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+ if n.which == eAry {
+ return &n.ary, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eAry
+ return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+ buf := &bytes.Buffer{}
+
+ if n.raw == nil {
+ return nil
+ }
+
+ err := json.Compact(buf, *n.raw)
+
+ if err != nil {
+ return *n.raw
+ }
+
+ return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eDoc
+ return true
+}
+
+func (n *lazyNode) tryAry() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eAry
+ return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+ if n.which == eRaw {
+ if !n.tryDoc() && !n.tryAry() {
+ if o.which != eRaw {
+ return false
+ }
+
+ return bytes.Equal(n.compact(), o.compact())
+ }
+ }
+
+ if n.which == eDoc {
+ if o.which == eRaw {
+ if !o.tryDoc() {
+ return false
+ }
+ }
+
+ if o.which != eDoc {
+ return false
+ }
+
+ if len(n.doc) != len(o.doc) {
+ return false
+ }
+
+ for k, v := range n.doc {
+ ov, ok := o.doc[k]
+
+ if !ok {
+ return false
+ }
+
+ if (v == nil) != (ov == nil) {
+ return false
+ }
+
+ if v == nil && ov == nil {
+ continue
+ }
+
+ if !v.equal(ov) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ if o.which != eAry && !o.tryAry() {
+ return false
+ }
+
+ if len(n.ary) != len(o.ary) {
+ return false
+ }
+
+ for idx, val := range n.ary {
+ if !val.equal(o.ary[idx]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Kind reads the "op" field of the Operation.
+func (o Operation) Kind() string {
+ if obj, ok := o["op"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown"
+ }
+
+ return op
+ }
+
+ return "unknown"
+}
+
+// Path reads the "path" field of the Operation.
+func (o Operation) Path() (string, error) {
+ if obj, ok := o["path"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+}
+
+// From reads the "from" field of the Operation.
+func (o Operation) From() (string, error) {
+ if obj, ok := o["from"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+}
+
+func (o Operation) value() *lazyNode {
+ if obj, ok := o["value"]; ok {
+ return newLazyNode(obj)
+ }
+
+ return nil
+}
+
+// ValueInterface decodes the operation value into an interface.
+func (o Operation) ValueInterface() (interface{}, error) {
+ if obj, ok := o["value"]; ok && obj != nil {
+ var v interface{}
+
+ err := json.Unmarshal(*obj, &v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+}
+
+func isArray(buf []byte) bool {
+Loop:
+ for _, c := range buf {
+ switch c {
+ case ' ':
+ case '\n':
+ case '\t':
+ continue
+ case '[':
+ return true
+ default:
+ break Loop
+ }
+ }
+
+ return false
+}
+
+func findObject(pd *container, path string) (container, string) {
+ doc := *pd
+
+ split := strings.Split(path, "/")
+
+ if len(split) < 2 {
+ return nil, ""
+ }
+
+ parts := split[1 : len(split)-1]
+
+ key := split[len(split)-1]
+
+ var err error
+
+ for _, part := range parts {
+
+ next, ok := doc.get(decodePatchKey(part))
+
+ if next == nil || ok != nil {
+ return nil, ""
+ }
+
+ if isArray(*next.raw) {
+ doc, err = next.intoAry()
+
+ if err != nil {
+ return nil, ""
+ }
+ } else {
+ doc, err = next.intoDoc()
+
+ if err != nil {
+ return nil, ""
+ }
+ }
+ }
+
+ return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) get(key string) (*lazyNode, error) {
+ return (*d)[key], nil
+}
+
+func (d *partialDoc) remove(key string) error {
+ _, ok := (*d)[key]
+ if !ok {
+ return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+ }
+
+ delete(*d, key)
+ return nil
+}
+
+// set should only be used to implement the "replace" operation, so "key" must
+// be an already existing index in "d".
+func (d *partialArray) set(key string, val *lazyNode) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ (*d)[idx] = val
+ return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode) error {
+ if key == "-" {
+ *d = append(*d, val)
+ return nil
+ }
+
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ }
+
+ sz := len(*d) + 1
+
+ ary := make([]*lazyNode, sz)
+
+ cur := *d
+
+ if idx >= len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(ary)
+ }
+
+ copy(ary[0:idx], cur[0:idx])
+ ary[idx] = val
+ copy(ary[idx+1:], cur[idx:])
+
+ *d = ary
+ return nil
+}
+
+func (d *partialArray) get(key string) (*lazyNode, error) {
+ idx, err := strconv.Atoi(key)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ if idx >= len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ cur := *d
+
+ if idx >= len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(cur)
+ }
+
+ ary := make([]*lazyNode, len(cur)-1)
+
+ copy(ary[0:idx], cur[0:idx])
+ copy(ary[idx:], cur[idx+1:])
+
+ *d = ary
+ return nil
+
+}
+
+func (p Patch) add(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.add(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in add for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) remove(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) replace(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "replace operation failed to decode path")
+ }
+
+ if path == "" {
+ val := op.value()
+
+ if val.which == eRaw {
+ if !val.tryDoc() {
+ if !val.tryAry() {
+ return errors.Wrapf(err, "replace operation value must be object or array")
+ }
+ }
+ }
+
+ switch val.which {
+ case eAry:
+ *doc = &val.ary
+ case eDoc:
+ *doc = &val.doc
+ case eRaw:
+ return errors.Wrapf(err, "replace operation hit impossible case")
+ }
+
+ return nil
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ }
+
+ _, ok := con.get(key)
+ if ok != nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ }
+
+ err = con.set(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) move(doc *container, op Operation) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ err = con.add(key, val)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) test(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "test operation failed to decode path")
+ }
+
+ if path == "" {
+ var self lazyNode
+
+ switch sv := (*doc).(type) {
+ case *partialDoc:
+ self.doc = *sv
+ self.which = eDoc
+ case *partialArray:
+ self.ary = *sv
+ self.which = eAry
+ }
+
+ if self.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in test for path: '%s'", path)
+ }
+
+ if val == nil {
+ if op.value().raw == nil {
+ return nil
+ }
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ } else if op.value() == nil {
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ if val.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "copy operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ valCopy, sz, err := deepCopy(val)
+ if err != nil {
+ return errors.Wrapf(err, "error while performing deep copy")
+ }
+
+ (*accumulatedCopySize) += int64(sz)
+ if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
+ return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
+ }
+
+ err = con.add(key, valCopy)
+ if err != nil {
+ return errors.Wrapf(err, "error while adding value during copy")
+ }
+
+ return nil
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+ ra := make(json.RawMessage, len(a))
+ copy(ra, a)
+ la := newLazyNode(&ra)
+
+ rb := make(json.RawMessage, len(b))
+ copy(rb, b)
+ lb := newLazyNode(&rb)
+
+ return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+ var p Patch
+
+ err := json.Unmarshal(buf, &p)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+ return p.ApplyIndent(doc, "")
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+ if len(doc) == 0 {
+ return doc, nil
+ }
+
+ var pd container
+ if doc[0] == '[' {
+ pd = &partialArray{}
+ } else {
+ pd = &partialDoc{}
+ }
+
+ err := json.Unmarshal(doc, pd)
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = nil
+
+ var accumulatedCopySize int64
+
+ for _, op := range p {
+ switch op.Kind() {
+ case "add":
+ err = p.add(&pd, op)
+ case "remove":
+ err = p.remove(&pd, op)
+ case "replace":
+ err = p.replace(&pd, op)
+ case "move":
+ err = p.move(&pd, op)
+ case "test":
+ err = p.test(&pd, op)
+ case "copy":
+ err = p.copy(&pd, op, &accumulatedCopySize)
+ default:
+ err = fmt.Errorf("Unexpected kind: %s", op.Kind())
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if indent != "" {
+ return json.MarshalIndent(pd, "", indent)
+ }
+
+ return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence. This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+ rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+ return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
index 385c60e0d..98066211d 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:prerelease-lifecycle-gen=true
// +groupName=admissionregistration.k8s.io
// Package v1alpha1 is the v1alpha1 version of the API.
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
index 111cc7287..993ff6f20 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
@@ -25,6 +25,7 @@ import (
io "io"
proto "github.com/gogo/protobuf/proto"
+ k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -45,10 +46,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
+func (*ApplyConfiguration) ProtoMessage() {}
+func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{0}
+}
+func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ApplyConfiguration.Merge(m, src)
+}
+func (m *ApplyConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ApplyConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
+
func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
func (*AuditAnnotation) ProtoMessage() {}
func (*AuditAnnotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{0}
+ return fileDescriptor_2c49182728ae0af5, []int{1}
}
func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -76,7 +105,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
func (*ExpressionWarning) ProtoMessage() {}
func (*ExpressionWarning) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{1}
+ return fileDescriptor_2c49182728ae0af5, []int{2}
}
func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -101,10 +130,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() {
var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
+func (m *JSONPatch) Reset() { *m = JSONPatch{} }
+func (*JSONPatch) ProtoMessage() {}
+func (*JSONPatch) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{3}
+}
+func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *JSONPatch) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_JSONPatch.Merge(m, src)
+}
+func (m *JSONPatch) XXX_Size() int {
+ return m.Size()
+}
+func (m *JSONPatch) XXX_DiscardUnknown() {
+ xxx_messageInfo_JSONPatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
+
func (m *MatchCondition) Reset() { *m = MatchCondition{} }
func (*MatchCondition) ProtoMessage() {}
func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{2}
+ return fileDescriptor_2c49182728ae0af5, []int{4}
}
func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -132,7 +189,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
func (m *MatchResources) Reset() { *m = MatchResources{} }
func (*MatchResources) ProtoMessage() {}
func (*MatchResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{3}
+ return fileDescriptor_2c49182728ae0af5, []int{5}
}
func (m *MatchResources) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -157,10 +214,206 @@ func (m *MatchResources) XXX_DiscardUnknown() {
var xxx_messageInfo_MatchResources proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
+func (*MutatingAdmissionPolicy) ProtoMessage() {}
+func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{6}
+}
+func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
+func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{7}
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
+func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{8}
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
+func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{9}
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
+func (*MutatingAdmissionPolicyList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{10}
+}
+func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
+func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{11}
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
+
+func (m *Mutation) Reset() { *m = Mutation{} }
+func (*Mutation) ProtoMessage() {}
+func (*Mutation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2c49182728ae0af5, []int{12}
+}
+func (m *Mutation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Mutation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Mutation.Merge(m, src)
+}
+func (m *Mutation) XXX_Size() int {
+ return m.Size()
+}
+func (m *Mutation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Mutation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Mutation proto.InternalMessageInfo
+
func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
func (*NamedRuleWithOperations) ProtoMessage() {}
func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{4}
+ return fileDescriptor_2c49182728ae0af5, []int{13}
}
func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -188,7 +441,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
func (m *ParamKind) Reset() { *m = ParamKind{} }
func (*ParamKind) ProtoMessage() {}
func (*ParamKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{5}
+ return fileDescriptor_2c49182728ae0af5, []int{14}
}
func (m *ParamKind) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -216,7 +469,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo
func (m *ParamRef) Reset() { *m = ParamRef{} }
func (*ParamRef) ProtoMessage() {}
func (*ParamRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{6}
+ return fileDescriptor_2c49182728ae0af5, []int{15}
}
func (m *ParamRef) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -244,7 +497,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo
func (m *TypeChecking) Reset() { *m = TypeChecking{} }
func (*TypeChecking) ProtoMessage() {}
func (*TypeChecking) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{7}
+ return fileDescriptor_2c49182728ae0af5, []int{16}
}
func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -272,7 +525,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
func (*ValidatingAdmissionPolicy) ProtoMessage() {}
func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{8}
+ return fileDescriptor_2c49182728ae0af5, []int{17}
}
func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -300,7 +553,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{9}
+ return fileDescriptor_2c49182728ae0af5, []int{18}
}
func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -328,7 +581,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{10}
+ return fileDescriptor_2c49182728ae0af5, []int{19}
}
func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -356,7 +609,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn
func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{11}
+ return fileDescriptor_2c49182728ae0af5, []int{20}
}
func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -384,7 +637,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn
func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{12}
+ return fileDescriptor_2c49182728ae0af5, []int{21}
}
func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -412,7 +665,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{13}
+ return fileDescriptor_2c49182728ae0af5, []int{22}
}
func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -440,7 +693,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{14}
+ return fileDescriptor_2c49182728ae0af5, []int{23}
}
func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -468,7 +721,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
func (m *Validation) Reset() { *m = Validation{} }
func (*Validation) ProtoMessage() {}
func (*Validation) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{15}
+ return fileDescriptor_2c49182728ae0af5, []int{24}
}
func (m *Validation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -496,7 +749,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo
func (m *Variable) Reset() { *m = Variable{} }
func (*Variable) ProtoMessage() {}
func (*Variable) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{16}
+ return fileDescriptor_2c49182728ae0af5, []int{25}
}
func (m *Variable) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,10 +775,19 @@ func (m *Variable) XXX_DiscardUnknown() {
var xxx_messageInfo_Variable proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ApplyConfiguration")
proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation")
proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning")
+ proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1alpha1.JSONPatch")
proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition")
proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources")
+ proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy")
+ proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding")
+ proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingList")
+ proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec")
+ proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyList")
+ proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec")
+ proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Mutation")
proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations")
proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind")
proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef")
@@ -546,101 +808,147 @@ func init() {
}
var fileDescriptor_2c49182728ae0af5 = []byte{
- // 1498 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0xc5,
- 0x17, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0x73, 0xb1, 0xe7, 0xdf, 0x2a, 0x6e, 0xfe, 0xd4, 0x1b, 0xad,
- 0x2a, 0xd4, 0x48, 0xb0, 0x26, 0x69, 0xa1, 0xb4, 0x42, 0x42, 0xd9, 0xde, 0xe8, 0x25, 0x17, 0x4d,
- 0x51, 0x22, 0x21, 0x90, 0x98, 0xec, 0x4e, 0xec, 0x69, 0xbc, 0x17, 0x76, 0xd6, 0xa1, 0x11, 0x48,
- 0x54, 0xe2, 0x05, 0xde, 0x78, 0xe0, 0x85, 0x57, 0x3e, 0x02, 0xdf, 0x80, 0xb7, 0x3e, 0xf6, 0xb1,
- 0x3c, 0x60, 0x51, 0xf3, 0xc2, 0x27, 0x00, 0x29, 0x2f, 0xa0, 0x99, 0x9d, 0xbd, 0xda, 0x26, 0x76,
- 0x09, 0xbc, 0x79, 0xce, 0x9c, 0xf3, 0xfb, 0xcd, 0x39, 0x73, 0xce, 0xd9, 0x33, 0x06, 0xd7, 0x0e,
- 0xde, 0x66, 0x3a, 0x75, 0x1b, 0xd8, 0xa3, 0x0d, 0x6c, 0xd9, 0x94, 0x31, 0xea, 0x3a, 0x3e, 0x69,
- 0x52, 0x16, 0xf8, 0x38, 0xa0, 0xae, 0xd3, 0x38, 0x5c, 0xc5, 0x6d, 0xaf, 0x85, 0x57, 0x1b, 0x4d,
- 0xe2, 0x10, 0x1f, 0x07, 0xc4, 0xd2, 0x3d, 0xdf, 0x0d, 0x5c, 0xb8, 0x12, 0x9a, 0xea, 0xd8, 0xa3,
- 0xfa, 0x40, 0x53, 0x3d, 0x32, 0x5d, 0x7a, 0xbd, 0x49, 0x83, 0x56, 0x67, 0x4f, 0x37, 0x5d, 0xbb,
- 0xd1, 0x74, 0x9b, 0x6e, 0x43, 0x20, 0xec, 0x75, 0xf6, 0xc5, 0x4a, 0x2c, 0xc4, 0xaf, 0x10, 0x79,
- 0xe9, 0xf2, 0x08, 0x87, 0xca, 0x1f, 0x67, 0xe9, 0x4a, 0x62, 0x64, 0x63, 0xb3, 0x45, 0x1d, 0xe2,
- 0x1f, 0x35, 0xbc, 0x83, 0x26, 0x17, 0xb0, 0x86, 0x4d, 0x02, 0x3c, 0xc8, 0xaa, 0x31, 0xcc, 0xca,
- 0xef, 0x38, 0x01, 0xb5, 0x49, 0x9f, 0xc1, 0x5b, 0x27, 0x19, 0x30, 0xb3, 0x45, 0x6c, 0x9c, 0xb7,
- 0xd3, 0x18, 0x58, 0x58, 0xef, 0x58, 0x34, 0x58, 0x77, 0x1c, 0x37, 0x10, 0x4e, 0xc0, 0x0b, 0xa0,
- 0x70, 0x40, 0x8e, 0x6a, 0xca, 0xb2, 0x72, 0xa9, 0x64, 0x94, 0x9f, 0x76, 0xd5, 0x89, 0x5e, 0x57,
- 0x2d, 0xdc, 0x27, 0x47, 0x88, 0xcb, 0xe1, 0x3a, 0x58, 0x38, 0xc4, 0xed, 0x0e, 0xb9, 0xf5, 0xd8,
- 0xf3, 0x89, 0x08, 0x41, 0x6d, 0x52, 0xa8, 0x2e, 0x4a, 0xd5, 0x85, 0x9d, 0xec, 0x36, 0xca, 0xeb,
- 0x6b, 0x6d, 0x50, 0x4d, 0x56, 0xbb, 0xd8, 0x77, 0xa8, 0xd3, 0x84, 0xaf, 0x81, 0x99, 0x7d, 0x4a,
- 0xda, 0x16, 0x22, 0xfb, 0x12, 0xb0, 0x22, 0x01, 0x67, 0x6e, 0x4b, 0x39, 0x8a, 0x35, 0xe0, 0x0a,
- 0x98, 0xfe, 0x34, 0x34, 0xac, 0x15, 0x84, 0xf2, 0x82, 0x54, 0x9e, 0x96, 0x78, 0x28, 0xda, 0xd7,
- 0xf6, 0xc1, 0xfc, 0x06, 0x0e, 0xcc, 0xd6, 0x0d, 0xd7, 0xb1, 0xa8, 0xf0, 0x70, 0x19, 0x14, 0x1d,
- 0x6c, 0x13, 0xe9, 0xe2, 0xac, 0xb4, 0x2c, 0x6e, 0x62, 0x9b, 0x20, 0xb1, 0x03, 0xd7, 0x00, 0x20,
- 0x79, 0xff, 0xa0, 0xd4, 0x03, 0x29, 0xd7, 0x52, 0x5a, 0xda, 0x4f, 0x45, 0x49, 0x84, 0x08, 0x73,
- 0x3b, 0xbe, 0x49, 0x18, 0x7c, 0x0c, 0xaa, 0x1c, 0x8e, 0x79, 0xd8, 0x24, 0x0f, 0x49, 0x9b, 0x98,
- 0x81, 0xeb, 0x0b, 0xd6, 0xf2, 0xda, 0x65, 0x3d, 0xc9, 0xd3, 0xf8, 0xc6, 0x74, 0xef, 0xa0, 0xc9,
- 0x05, 0x4c, 0xe7, 0x89, 0xa1, 0x1f, 0xae, 0xea, 0x0f, 0xf0, 0x1e, 0x69, 0x47, 0xa6, 0xc6, 0xb9,
- 0x5e, 0x57, 0xad, 0x6e, 0xe6, 0x11, 0x51, 0x3f, 0x09, 0x74, 0xc1, 0xbc, 0xbb, 0xf7, 0x88, 0x98,
- 0x41, 0x4c, 0x3b, 0xf9, 0xf2, 0xb4, 0xb0, 0xd7, 0x55, 0xe7, 0xb7, 0x32, 0x70, 0x28, 0x07, 0x0f,
- 0xbf, 0x00, 0x73, 0xbe, 0xf4, 0x1b, 0x75, 0xda, 0x84, 0xd5, 0x0a, 0xcb, 0x85, 0x4b, 0xe5, 0x35,
- 0x43, 0x1f, 0xb9, 0x1c, 0x75, 0xee, 0x98, 0xc5, 0x8d, 0x77, 0x69, 0xd0, 0xda, 0xf2, 0x48, 0xb8,
- 0xcf, 0x8c, 0x73, 0x32, 0xf0, 0x73, 0x28, 0x4d, 0x80, 0xb2, 0x7c, 0xf0, 0x5b, 0x05, 0x9c, 0x25,
- 0x8f, 0xcd, 0x76, 0xc7, 0x22, 0x19, 0xbd, 0x5a, 0xf1, 0xd4, 0x0e, 0xf2, 0x8a, 0x3c, 0xc8, 0xd9,
- 0x5b, 0x03, 0x78, 0xd0, 0x40, 0x76, 0x78, 0x13, 0x94, 0x6d, 0x9e, 0x14, 0xdb, 0x6e, 0x9b, 0x9a,
- 0x47, 0xb5, 0x69, 0x91, 0x4a, 0x5a, 0xaf, 0xab, 0x96, 0x37, 0x12, 0xf1, 0x71, 0x57, 0x5d, 0x48,
- 0x2d, 0xdf, 0x3f, 0xf2, 0x08, 0x4a, 0x9b, 0x69, 0xcf, 0x15, 0xb0, 0x38, 0xe4, 0x54, 0xf0, 0x6a,
- 0x12, 0x79, 0x91, 0x1a, 0x35, 0x65, 0xb9, 0x70, 0xa9, 0x64, 0x54, 0xd3, 0x11, 0x13, 0x1b, 0x28,
- 0xab, 0x07, 0xbf, 0x54, 0x00, 0xf4, 0xfb, 0xf0, 0x64, 0xa2, 0x5c, 0x1d, 0x25, 0x5e, 0xfa, 0x80,
- 0x20, 0x2d, 0xc9, 0x20, 0xc1, 0xfe, 0x3d, 0x34, 0x80, 0x4e, 0xc3, 0xa0, 0xb4, 0x8d, 0x7d, 0x6c,
- 0xdf, 0xa7, 0x8e, 0xc5, 0xeb, 0x0e, 0x7b, 0x74, 0x87, 0xf8, 0xa2, 0xee, 0x94, 0x6c, 0xdd, 0xad,
- 0x6f, 0xdf, 0x95, 0x3b, 0x28, 0xa5, 0xc5, 0xab, 0xf9, 0x80, 0x3a, 0x96, 0xac, 0xd2, 0xb8, 0x9a,
- 0x39, 0x1e, 0x12, 0x3b, 0xda, 0x0f, 0x93, 0x60, 0x46, 0x70, 0xf0, 0xce, 0x71, 0x72, 0xf1, 0x37,
- 0x40, 0x29, 0x2e, 0x28, 0x89, 0x5a, 0x95, 0x6a, 0xa5, 0xb8, 0xf8, 0x50, 0xa2, 0x03, 0x3f, 0x02,
- 0x33, 0x2c, 0x2a, 0xb3, 0xc2, 0xcb, 0x97, 0xd9, 0x2c, 0xef, 0x75, 0x71, 0x81, 0xc5, 0x90, 0x30,
- 0x00, 0x8b, 0x1e, 0x3f, 0x3d, 0x09, 0x88, 0xbf, 0xe9, 0x06, 0xb7, 0xdd, 0x8e, 0x63, 0xad, 0x9b,
- 0x3c, 0x7a, 0xb5, 0xa2, 0x38, 0xdd, 0xf5, 0x5e, 0x57, 0x5d, 0xdc, 0x1e, 0xac, 0x72, 0xdc, 0x55,
- 0xff, 0x3f, 0x64, 0x4b, 0xa4, 0xd9, 0x30, 0x68, 0xed, 0x3b, 0x05, 0xcc, 0x72, 0x8d, 0x1b, 0x2d,
- 0x62, 0x1e, 0xf0, 0x06, 0xfd, 0x95, 0x02, 0x20, 0xc9, 0xb7, 0xed, 0x30, 0xdb, 0xca, 0x6b, 0xef,
- 0x8c, 0x51, 0x5e, 0x7d, 0xbd, 0x3f, 0xc9, 0x99, 0xbe, 0x2d, 0x86, 0x06, 0x70, 0x6a, 0x3f, 0x4f,
- 0x82, 0xf3, 0x3b, 0xb8, 0x4d, 0x2d, 0x1c, 0x50, 0xa7, 0xb9, 0x1e, 0xd1, 0x85, 0xc5, 0x02, 0x3f,
- 0x06, 0x33, 0x3c, 0xc0, 0x16, 0x0e, 0xb0, 0x6c, 0xb6, 0x6f, 0x8c, 0x76, 0x1d, 0x61, 0x8b, 0xdb,
- 0x20, 0x01, 0x4e, 0x92, 0x2e, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x02, 0x45, 0xe6, 0x11, 0x53, 0x96,
- 0xca, 0x7b, 0x63, 0xf8, 0x3e, 0xf4, 0xd4, 0x0f, 0x3d, 0x62, 0x26, 0xd9, 0xc8, 0x57, 0x48, 0x70,
- 0x40, 0x1f, 0x4c, 0xb1, 0x00, 0x07, 0x1d, 0x26, 0x53, 0xeb, 0xde, 0xa9, 0xb0, 0x09, 0x44, 0x63,
- 0x5e, 0xf2, 0x4d, 0x85, 0x6b, 0x24, 0x99, 0xb4, 0x3f, 0x14, 0xb0, 0x3c, 0xd4, 0xd6, 0xa0, 0x8e,
- 0xc5, 0xf3, 0xe1, 0xdf, 0x0f, 0xf3, 0x27, 0x99, 0x30, 0x6f, 0x9d, 0x86, 0xe3, 0xf2, 0xf0, 0xc3,
- 0xa2, 0xad, 0xfd, 0xae, 0x80, 0x8b, 0x27, 0x19, 0x3f, 0xa0, 0x2c, 0x80, 0x1f, 0xf6, 0x79, 0xaf,
- 0x8f, 0x58, 0xf3, 0x94, 0x85, 0xbe, 0xc7, 0xe3, 0x4d, 0x24, 0x49, 0x79, 0xee, 0x81, 0x33, 0x34,
- 0x20, 0x36, 0x6f, 0xc6, 0xbc, 0xba, 0xee, 0x9f, 0xa2, 0xeb, 0xc6, 0x9c, 0xe4, 0x3d, 0x73, 0x97,
- 0x33, 0xa0, 0x90, 0x48, 0xfb, 0xba, 0x70, 0xb2, 0xe3, 0x3c, 0x4e, 0xbc, 0x45, 0x7b, 0x42, 0xb8,
- 0x99, 0x74, 0xd1, 0xf8, 0x1a, 0xb7, 0xe3, 0x1d, 0x94, 0xd2, 0xe2, 0x0d, 0xd2, 0x93, 0xfd, 0x77,
- 0xc0, 0x1c, 0x72, 0x92, 0x47, 0x51, 0xeb, 0x0e, 0x1b, 0x64, 0xb4, 0x42, 0x31, 0x24, 0xec, 0x80,
- 0x79, 0x3b, 0x33, 0x78, 0xc9, 0x52, 0xb9, 0x36, 0x06, 0x49, 0x76, 0x72, 0x0b, 0x47, 0x9e, 0xac,
- 0x0c, 0xe5, 0x48, 0xe0, 0x2e, 0xa8, 0x1e, 0xca, 0x88, 0xb9, 0x4e, 0xd8, 0x35, 0xc3, 0x69, 0xa3,
- 0x64, 0xac, 0xf0, 0x41, 0x6d, 0x27, 0xbf, 0x79, 0xdc, 0x55, 0x2b, 0x79, 0x21, 0xea, 0xc7, 0xd0,
- 0x7e, 0x53, 0xc0, 0x85, 0xa1, 0x77, 0xf1, 0x1f, 0x64, 0x1f, 0xcd, 0x66, 0xdf, 0xcd, 0x53, 0xc9,
- 0xbe, 0xc1, 0x69, 0xf7, 0xfd, 0xd4, 0xdf, 0xb8, 0x2a, 0xf2, 0x0d, 0x83, 0x92, 0x17, 0xcd, 0x07,
- 0xd2, 0xd7, 0x2b, 0xe3, 0x26, 0x0f, 0xb7, 0x35, 0xe6, 0xf8, 0xf7, 0x3b, 0x5e, 0xa2, 0x04, 0x15,
- 0x7e, 0x06, 0x2a, 0xb6, 0x7c, 0x21, 0x70, 0x00, 0xea, 0x04, 0xd1, 0x14, 0xf4, 0x0f, 0x32, 0xe8,
- 0x6c, 0xaf, 0xab, 0x56, 0x36, 0x72, 0xb0, 0xa8, 0x8f, 0x08, 0xb6, 0x41, 0x39, 0xc9, 0x80, 0x68,
- 0x6c, 0x7e, 0xf3, 0x25, 0x42, 0xee, 0x3a, 0xc6, 0xff, 0x64, 0x8c, 0xcb, 0x89, 0x8c, 0xa1, 0x34,
- 0x3c, 0x7c, 0x00, 0xe6, 0xf6, 0x31, 0x6d, 0x77, 0x7c, 0x22, 0x07, 0xd2, 0x70, 0x82, 0x78, 0x95,
- 0x0f, 0x8b, 0xb7, 0xd3, 0x1b, 0xc7, 0x5d, 0xb5, 0x9a, 0x11, 0x88, 0x69, 0x21, 0x6b, 0x0c, 0x9f,
- 0x28, 0xa0, 0x82, 0xb3, 0xcf, 0x47, 0x56, 0x3b, 0x23, 0x3c, 0xb8, 0x3e, 0x86, 0x07, 0xb9, 0x17,
- 0xa8, 0x51, 0x93, 0x6e, 0x54, 0x72, 0x1b, 0x0c, 0xf5, 0xb1, 0xc1, 0xcf, 0xc1, 0x82, 0x9d, 0x79,
- 0xdd, 0xb1, 0xda, 0x94, 0x38, 0xc0, 0xd8, 0x57, 0x17, 0x23, 0x24, 0x2f, 0xd9, 0xac, 0x9c, 0xa1,
- 0x3c, 0x15, 0xb4, 0x40, 0xe9, 0x10, 0xfb, 0x14, 0xef, 0xf1, 0x87, 0xc6, 0xb4, 0xe0, 0xbd, 0x3c,
- 0xd6, 0xd5, 0x85, 0xb6, 0xc9, 0x7c, 0x19, 0x49, 0x18, 0x4a, 0x80, 0xb5, 0x1f, 0x27, 0x81, 0x7a,
- 0xc2, 0xa7, 0x1c, 0xde, 0x03, 0xd0, 0xdd, 0x63, 0xc4, 0x3f, 0x24, 0xd6, 0x9d, 0xf0, 0x8d, 0x1f,
- 0x4d, 0xd0, 0x85, 0x64, 0xbc, 0xda, 0xea, 0xd3, 0x40, 0x03, 0xac, 0xa0, 0x0d, 0x66, 0x83, 0xd4,
- 0xe4, 0x37, 0xce, 0x8b, 0x40, 0x3a, 0x96, 0x1e, 0x1c, 0x8d, 0x4a, 0xaf, 0xab, 0x66, 0x46, 0x49,
- 0x94, 0x81, 0x87, 0x26, 0x00, 0x66, 0x72, 0x7b, 0x61, 0x01, 0x34, 0x46, 0x6b, 0x67, 0xc9, 0x9d,
- 0xc5, 0x9f, 0xa0, 0xd4, 0x75, 0xa5, 0x60, 0xb5, 0x3f, 0x15, 0x00, 0x92, 0xaa, 0x80, 0x17, 0x41,
- 0xea, 0x19, 0x2f, 0xbf, 0x62, 0x45, 0x0e, 0x81, 0x52, 0x72, 0xb8, 0x02, 0xa6, 0x6d, 0xc2, 0x18,
- 0x6e, 0x46, 0xef, 0x80, 0xf8, 0x5f, 0x86, 0x8d, 0x50, 0x8c, 0xa2, 0x7d, 0xb8, 0x0b, 0xa6, 0x7c,
- 0x82, 0x99, 0xeb, 0xc8, 0xff, 0x23, 0xde, 0xe5, 0x63, 0x15, 0x12, 0x92, 0xe3, 0xae, 0xba, 0x3a,
- 0xca, 0xbf, 0x40, 0xba, 0x9c, 0xc2, 0x84, 0x11, 0x92, 0x70, 0xf0, 0x0e, 0xa8, 0x4a, 0x8e, 0xd4,
- 0x81, 0xc3, 0xaa, 0x3d, 0x2f, 0x4f, 0x53, 0xdd, 0xc8, 0x2b, 0xa0, 0x7e, 0x1b, 0xed, 0x1e, 0x98,
- 0x89, 0xb2, 0x0b, 0xd6, 0x40, 0x31, 0xf5, 0xf9, 0x0e, 0x1d, 0x17, 0x92, 0x5c, 0x60, 0x26, 0x07,
- 0x07, 0xc6, 0xd8, 0x7a, 0xfa, 0xa2, 0x3e, 0xf1, 0xec, 0x45, 0x7d, 0xe2, 0xf9, 0x8b, 0xfa, 0xc4,
- 0x93, 0x5e, 0x5d, 0x79, 0xda, 0xab, 0x2b, 0xcf, 0x7a, 0x75, 0xe5, 0x79, 0xaf, 0xae, 0xfc, 0xd2,
- 0xab, 0x2b, 0xdf, 0xfc, 0x5a, 0x9f, 0xf8, 0x60, 0x65, 0xe4, 0x7f, 0xf1, 0xfe, 0x0a, 0x00, 0x00,
- 0xff, 0xff, 0x22, 0xbd, 0xc5, 0xc7, 0xf1, 0x13, 0x00, 0x00,
+ // 1783 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x1b, 0x4b,
+ 0x15, 0xcf, 0xda, 0xce, 0x87, 0xc7, 0xf9, 0xf2, 0xd0, 0x12, 0x37, 0xa5, 0xde, 0x68, 0x55, 0xa1,
+ 0x46, 0x82, 0x35, 0x49, 0x0b, 0xa5, 0x55, 0x51, 0x95, 0x6d, 0x9b, 0xb6, 0x69, 0x9d, 0x44, 0x53,
+ 0x94, 0x20, 0x04, 0x12, 0x93, 0xf5, 0xc4, 0xde, 0xc6, 0xfb, 0xc1, 0xce, 0x3a, 0x34, 0x02, 0x89,
+ 0x4a, 0x08, 0x09, 0xde, 0x78, 0xe0, 0x85, 0x37, 0xc4, 0x1f, 0xc0, 0x03, 0xfc, 0x05, 0xbc, 0xf5,
+ 0xb1, 0x8f, 0xe5, 0x81, 0x15, 0x35, 0x20, 0xf1, 0x0c, 0xd2, 0xbd, 0x52, 0x5e, 0xee, 0xd5, 0xcc,
+ 0xce, 0x7e, 0x79, 0xed, 0xc6, 0x4e, 0xd3, 0xf4, 0xe1, 0xde, 0x37, 0xcf, 0xf9, 0xf8, 0x9d, 0x39,
+ 0x67, 0xce, 0x9c, 0x39, 0xc7, 0x0b, 0x6e, 0x1d, 0x7c, 0x97, 0xaa, 0x86, 0x5d, 0xc3, 0x8e, 0x51,
+ 0xc3, 0x0d, 0xd3, 0xa0, 0xd4, 0xb0, 0x2d, 0x97, 0x34, 0x0d, 0xea, 0xb9, 0xd8, 0x33, 0x6c, 0xab,
+ 0x76, 0xb8, 0x82, 0xdb, 0x4e, 0x0b, 0xaf, 0xd4, 0x9a, 0xc4, 0x22, 0x2e, 0xf6, 0x48, 0x43, 0x75,
+ 0x5c, 0xdb, 0xb3, 0xe1, 0x72, 0xa0, 0xaa, 0x62, 0xc7, 0x50, 0xfb, 0xaa, 0xaa, 0xa1, 0xea, 0xe2,
+ 0x37, 0x9b, 0x86, 0xd7, 0xea, 0xec, 0xa9, 0xba, 0x6d, 0xd6, 0x9a, 0x76, 0xd3, 0xae, 0x71, 0x84,
+ 0xbd, 0xce, 0x3e, 0x5f, 0xf1, 0x05, 0xff, 0x15, 0x20, 0x2f, 0x5e, 0x1f, 0x62, 0x53, 0xbd, 0xdb,
+ 0x59, 0xbc, 0x11, 0x2b, 0x99, 0x58, 0x6f, 0x19, 0x16, 0x71, 0x8f, 0x6a, 0xce, 0x41, 0x93, 0x11,
+ 0x68, 0xcd, 0x24, 0x1e, 0xee, 0xa7, 0x55, 0x1b, 0xa4, 0xe5, 0x76, 0x2c, 0xcf, 0x30, 0x49, 0x46,
+ 0xe1, 0x3b, 0x27, 0x29, 0x50, 0xbd, 0x45, 0x4c, 0xdc, 0xab, 0xa7, 0x3c, 0x02, 0x70, 0xcd, 0x71,
+ 0xda, 0x47, 0xf7, 0x6c, 0x6b, 0xdf, 0x68, 0x76, 0x02, 0x3f, 0xe0, 0x2a, 0x00, 0xe4, 0x85, 0xe3,
+ 0x12, 0xee, 0x61, 0x45, 0x5a, 0x92, 0xae, 0x15, 0x35, 0xf8, 0xca, 0x97, 0xc7, 0xba, 0xbe, 0x0c,
+ 0x1e, 0x44, 0x1c, 0x94, 0x90, 0x52, 0x28, 0x98, 0x5b, 0xeb, 0x34, 0x0c, 0x6f, 0xcd, 0xb2, 0x6c,
+ 0x2f, 0x80, 0xb9, 0x02, 0xf2, 0x07, 0xe4, 0x48, 0xe8, 0x97, 0x84, 0x7e, 0xfe, 0x09, 0x39, 0x42,
+ 0x8c, 0x0e, 0xd7, 0xc0, 0xdc, 0x21, 0x6e, 0x77, 0x48, 0x0c, 0x58, 0xc9, 0x71, 0xd1, 0x05, 0x21,
+ 0x3a, 0xb7, 0x93, 0x66, 0xa3, 0x5e, 0x79, 0xa5, 0x0d, 0xca, 0xf1, 0x6a, 0x17, 0xbb, 0x96, 0x61,
+ 0x35, 0xe1, 0x37, 0xc0, 0xd4, 0xbe, 0x41, 0xda, 0x0d, 0x44, 0xf6, 0x05, 0xe0, 0xbc, 0x00, 0x9c,
+ 0x5a, 0x17, 0x74, 0x14, 0x49, 0xc0, 0x65, 0x30, 0xf9, 0xb3, 0x40, 0xb1, 0x92, 0xe7, 0xc2, 0x73,
+ 0x42, 0x78, 0x52, 0xe0, 0xa1, 0x90, 0xaf, 0xdc, 0x05, 0xc5, 0x8d, 0x67, 0x5b, 0x9b, 0xdb, 0xd8,
+ 0xd3, 0x5b, 0xa7, 0x8a, 0xd1, 0x3e, 0x98, 0xad, 0x33, 0xe5, 0x7b, 0xb6, 0xd5, 0x30, 0x78, 0x88,
+ 0x96, 0x40, 0xc1, 0xc2, 0x26, 0x11, 0xfa, 0xd3, 0x42, 0xbf, 0xb0, 0x89, 0x4d, 0x82, 0x38, 0xa7,
+ 0xc7, 0x4e, 0x6e, 0x28, 0x3b, 0x7f, 0x2f, 0x08, 0x43, 0x88, 0x50, 0xbb, 0xe3, 0xea, 0x84, 0xc2,
+ 0x17, 0xa0, 0xcc, 0xe0, 0xa8, 0x83, 0x75, 0xf2, 0x8c, 0xb4, 0x89, 0xee, 0xd9, 0x2e, 0xb7, 0x5a,
+ 0x5a, 0xbd, 0xae, 0xc6, 0x57, 0x26, 0x4a, 0x1e, 0xd5, 0x39, 0x68, 0x32, 0x02, 0x55, 0x59, 0x8e,
+ 0xaa, 0x87, 0x2b, 0xea, 0x53, 0xbc, 0x47, 0xda, 0xa1, 0xaa, 0x76, 0xb1, 0xeb, 0xcb, 0xe5, 0xcd,
+ 0x5e, 0x44, 0x94, 0x35, 0x02, 0x6d, 0x30, 0x6b, 0xef, 0x3d, 0x27, 0xba, 0x17, 0x99, 0xcd, 0x9d,
+ 0xde, 0x2c, 0xec, 0xfa, 0xf2, 0xec, 0x56, 0x0a, 0x0e, 0xf5, 0xc0, 0xc3, 0x5f, 0x82, 0x19, 0x57,
+ 0xf8, 0x8d, 0x3a, 0x6d, 0x42, 0x2b, 0xf9, 0xa5, 0xfc, 0xb5, 0xd2, 0xaa, 0xa6, 0x0e, 0x5d, 0x19,
+ 0x54, 0xe6, 0x58, 0x83, 0x29, 0xef, 0x1a, 0x5e, 0x6b, 0xcb, 0x21, 0x01, 0x9f, 0x6a, 0x17, 0x45,
+ 0xe0, 0x67, 0x50, 0xd2, 0x00, 0x4a, 0xdb, 0x83, 0xbf, 0x97, 0xc0, 0x05, 0xf2, 0x42, 0x6f, 0x77,
+ 0x1a, 0x24, 0x25, 0x57, 0x29, 0x9c, 0xd9, 0x46, 0xbe, 0x26, 0x36, 0x72, 0xe1, 0x41, 0x1f, 0x3b,
+ 0xa8, 0xaf, 0x75, 0x78, 0x1f, 0x94, 0x4c, 0x96, 0x14, 0xdb, 0x76, 0xdb, 0xd0, 0x8f, 0x2a, 0x93,
+ 0x3c, 0x95, 0x94, 0xae, 0x2f, 0x97, 0xea, 0x31, 0xf9, 0xd8, 0x97, 0xe7, 0x12, 0xcb, 0xef, 0x1f,
+ 0x39, 0x04, 0x25, 0xd5, 0x94, 0xff, 0x48, 0x60, 0xa1, 0xde, 0x61, 0x37, 0xdc, 0x6a, 0xae, 0x85,
+ 0x9b, 0x0f, 0x78, 0xf0, 0x27, 0x60, 0x8a, 0x1d, 0x5b, 0x03, 0x7b, 0x58, 0xe4, 0xd6, 0xb7, 0x86,
+ 0x3b, 0xe4, 0xe0, 0x44, 0xeb, 0xc4, 0xc3, 0x71, 0x6e, 0xc7, 0x34, 0x14, 0xa1, 0xc2, 0x16, 0x28,
+ 0x50, 0x87, 0xe8, 0x22, 0x85, 0xd6, 0x47, 0x88, 0xe4, 0x80, 0x3d, 0x3f, 0x73, 0x88, 0x1e, 0xdf,
+ 0x3b, 0xb6, 0x42, 0xdc, 0x82, 0xf2, 0x7f, 0x09, 0x54, 0x07, 0xe8, 0x68, 0x86, 0xd5, 0x60, 0x85,
+ 0xe6, 0xc3, 0xbb, 0x6b, 0xa7, 0xdc, 0xad, 0xbf, 0xbf, 0xbb, 0x62, 0xeb, 0x03, 0xbd, 0xfe, 0x9f,
+ 0x04, 0x94, 0x77, 0xab, 0x3e, 0x35, 0xa8, 0x07, 0x7f, 0x94, 0xf1, 0x5c, 0x1d, 0xf2, 0x36, 0x1b,
+ 0x34, 0xf0, 0x3b, 0x2a, 0xc9, 0x21, 0x25, 0xe1, 0xb5, 0x05, 0xc6, 0x0d, 0x8f, 0x98, 0xb4, 0x92,
+ 0xe3, 0xf7, 0xe5, 0xf1, 0x99, 0xb9, 0xad, 0xcd, 0x08, 0xab, 0xe3, 0x8f, 0x19, 0x3e, 0x0a, 0xcc,
+ 0x28, 0x7f, 0xce, 0x9d, 0xe4, 0x34, 0x8b, 0x10, 0xab, 0xc4, 0x0e, 0x27, 0x6e, 0xc6, 0x15, 0x3b,
+ 0x3a, 0xbe, 0xed, 0x88, 0x83, 0x12, 0x52, 0xf0, 0xc7, 0x60, 0xca, 0xc1, 0x2e, 0x36, 0xc3, 0xb7,
+ 0x28, 0x5d, 0xf6, 0x4e, 0xf2, 0x66, 0x5b, 0xa8, 0x6a, 0xd3, 0x2c, 0x52, 0xe1, 0x0a, 0x45, 0x90,
+ 0xb0, 0x03, 0x66, 0xcd, 0x54, 0x9d, 0xe7, 0x6f, 0x58, 0x69, 0xf5, 0xd6, 0x28, 0x21, 0x4b, 0x01,
+ 0x04, 0x15, 0x36, 0x4d, 0x43, 0x3d, 0x46, 0x94, 0x7f, 0x4b, 0xe0, 0xf2, 0x80, 0x80, 0x9d, 0x43,
+ 0x7a, 0x34, 0xd3, 0xe9, 0xa1, 0x9d, 0x41, 0x7a, 0xf4, 0xcf, 0x8b, 0x3f, 0x4e, 0x0c, 0x74, 0x93,
+ 0x27, 0x04, 0x06, 0x45, 0x7e, 0x12, 0x4f, 0x0c, 0xab, 0x21, 0xfc, 0xbc, 0x31, 0xea, 0xe9, 0x32,
+ 0x5d, 0x6d, 0xa6, 0xeb, 0xcb, 0xc5, 0x68, 0x89, 0x62, 0x54, 0xf8, 0x73, 0x30, 0x6f, 0x8a, 0x8e,
+ 0x81, 0x01, 0x18, 0x96, 0x47, 0x45, 0x1e, 0xbd, 0xc7, 0x11, 0x5f, 0xe8, 0xfa, 0xf2, 0x7c, 0xbd,
+ 0x07, 0x16, 0x65, 0x0c, 0xc1, 0x06, 0x28, 0x1e, 0x62, 0xd7, 0xc0, 0x7b, 0xf1, 0x23, 0x3a, 0x4a,
+ 0xf6, 0xee, 0x08, 0x5d, 0xad, 0x2c, 0xa2, 0x5b, 0x0c, 0x29, 0x14, 0xc5, 0xc0, 0xcc, 0x8a, 0xd9,
+ 0x09, 0x3a, 0xc6, 0xf0, 0x85, 0xbc, 0x3e, 0xf2, 0x91, 0xda, 0x56, 0x6c, 0x25, 0xa4, 0x50, 0x14,
+ 0x03, 0xc3, 0xa7, 0x60, 0x66, 0x1f, 0x1b, 0xed, 0x8e, 0x4b, 0xc4, 0xf3, 0x37, 0xce, 0xef, 0xef,
+ 0xd7, 0xd9, 0x63, 0xbe, 0x9e, 0x64, 0x1c, 0xfb, 0x72, 0x39, 0x45, 0xe0, 0x4f, 0x60, 0x5a, 0x19,
+ 0xfe, 0x02, 0xcc, 0x99, 0xa9, 0x46, 0x8e, 0x56, 0x26, 0xf8, 0xce, 0x47, 0x3e, 0x95, 0x08, 0x21,
+ 0xee, 0x7a, 0xd3, 0x74, 0x8a, 0x7a, 0x4d, 0xc1, 0xdf, 0x48, 0x00, 0xba, 0xc4, 0xb0, 0x0e, 0x6d,
+ 0x9d, 0x43, 0xa6, 0x1e, 0xf4, 0x1f, 0x08, 0x18, 0x88, 0x32, 0x12, 0xc7, 0xbe, 0x7c, 0x7b, 0x88,
+ 0x19, 0x46, 0xcd, 0x6a, 0xf2, 0x18, 0xf4, 0xb1, 0xa9, 0xfc, 0x35, 0x07, 0xa6, 0xc2, 0x78, 0xc3,
+ 0x3b, 0xec, 0x3e, 0x78, 0x7a, 0x8b, 0x49, 0x8b, 0x4e, 0xb5, 0x1a, 0x1e, 0xca, 0x76, 0xc8, 0x38,
+ 0x4e, 0x2e, 0x50, 0xac, 0x00, 0x7f, 0x2d, 0x01, 0x88, 0x33, 0xb3, 0x88, 0x28, 0x68, 0xdf, 0x1b,
+ 0x21, 0xae, 0xd9, 0x81, 0x46, 0xfb, 0x2a, 0x0b, 0x48, 0x96, 0x8e, 0xfa, 0x18, 0x64, 0xb7, 0xfa,
+ 0x39, 0xb5, 0x2d, 0xbe, 0xc7, 0x4a, 0x61, 0xe4, 0x5b, 0x1d, 0x4d, 0x08, 0xc1, 0xad, 0x8e, 0x96,
+ 0x28, 0x46, 0x55, 0xde, 0x48, 0x60, 0x61, 0x40, 0x67, 0x07, 0x6f, 0xc6, 0xdd, 0x2b, 0x6f, 0xaf,
+ 0x2b, 0xd2, 0x52, 0xfe, 0x5a, 0x51, 0x2b, 0x27, 0xbb, 0x4e, 0xce, 0x40, 0x69, 0x39, 0xf8, 0x2b,
+ 0x96, 0x15, 0x19, 0x3c, 0x51, 0x2d, 0x6e, 0x0e, 0xe3, 0x81, 0xda, 0xa7, 0xd1, 0x5c, 0x8c, 0xd2,
+ 0x29, 0xc3, 0x43, 0x7d, 0xcc, 0x29, 0x18, 0xc4, 0x85, 0x8c, 0xbd, 0x98, 0xd8, 0x31, 0x76, 0x88,
+ 0xdb, 0x6f, 0x46, 0x5a, 0xdb, 0x7e, 0x2c, 0x38, 0x28, 0x21, 0xc5, 0x26, 0xa2, 0x03, 0x56, 0x4f,
+ 0x73, 0xe9, 0x89, 0x88, 0x17, 0x46, 0xce, 0x51, 0xfe, 0x92, 0x03, 0xd1, 0x5b, 0x38, 0xc4, 0x00,
+ 0x55, 0x03, 0xc5, 0x68, 0x28, 0x11, 0xa8, 0x51, 0xa9, 0x88, 0x06, 0x18, 0x14, 0xcb, 0xb0, 0x37,
+ 0x9b, 0x86, 0xa3, 0x4a, 0xfe, 0xf4, 0xa3, 0x0a, 0x7f, 0xb3, 0xa3, 0x21, 0x25, 0x82, 0x84, 0x1e,
+ 0x58, 0xe0, 0xf5, 0x9d, 0x78, 0xc4, 0xdd, 0xb4, 0xbd, 0x75, 0xbb, 0x63, 0x35, 0xd6, 0x74, 0x9e,
+ 0xeb, 0x05, 0xbe, 0xbb, 0xdb, 0x5d, 0x5f, 0x5e, 0xd8, 0xee, 0x2f, 0x72, 0xec, 0xcb, 0x97, 0x07,
+ 0xb0, 0xf8, 0x7d, 0x1a, 0x04, 0xad, 0xfc, 0x41, 0x02, 0xd3, 0x4c, 0xe2, 0x5e, 0x8b, 0xe8, 0x07,
+ 0xac, 0x79, 0x65, 0x45, 0x84, 0xf4, 0xce, 0xce, 0x41, 0xb6, 0x95, 0x56, 0xef, 0x8c, 0x90, 0xf0,
+ 0x99, 0x01, 0x3c, 0xce, 0x99, 0x0c, 0x8b, 0xa2, 0x3e, 0x36, 0x95, 0x7f, 0xe4, 0xc0, 0xa5, 0x1d,
+ 0xdc, 0x36, 0x1a, 0x1f, 0x69, 0xa8, 0x78, 0x9e, 0xea, 0xb2, 0x1f, 0x8d, 0xf4, 0xc4, 0x0d, 0xd8,
+ 0xf5, 0xa0, 0x06, 0x1b, 0xba, 0x60, 0x82, 0x7a, 0xd8, 0xeb, 0x84, 0x9d, 0xda, 0xc6, 0x99, 0x58,
+ 0xe3, 0x88, 0xda, 0xac, 0xb0, 0x37, 0x11, 0xac, 0x91, 0xb0, 0xa4, 0x7c, 0x2a, 0x81, 0xa5, 0x81,
+ 0xba, 0xe7, 0x37, 0xcc, 0xfc, 0x34, 0x15, 0xe6, 0xad, 0xb3, 0x70, 0xfc, 0xa4, 0x71, 0xe6, 0x13,
+ 0x09, 0x5c, 0x3d, 0x49, 0xf9, 0x1c, 0x3a, 0x56, 0x27, 0xdd, 0xb1, 0x3e, 0x39, 0x43, 0xd7, 0x07,
+ 0xb4, 0xae, 0xbf, 0xcd, 0x9f, 0xec, 0xf8, 0x97, 0x43, 0x4d, 0xea, 0x1f, 0xb2, 0x5d, 0x50, 0x3e,
+ 0x14, 0x11, 0xb3, 0xad, 0xa0, 0x6a, 0x06, 0xfd, 0x68, 0x51, 0x5b, 0xee, 0xfa, 0x72, 0x79, 0xa7,
+ 0x97, 0x79, 0xec, 0xcb, 0xf3, 0xbd, 0x44, 0x94, 0xc5, 0x50, 0xfe, 0x2b, 0x81, 0x2b, 0x03, 0xcf,
+ 0xe2, 0x1c, 0xb2, 0xcf, 0x48, 0x67, 0xdf, 0xfd, 0x33, 0xc9, 0xbe, 0xfe, 0x69, 0xf7, 0xa7, 0x89,
+ 0x77, 0xb8, 0xfa, 0x85, 0x98, 0x99, 0xda, 0xa0, 0x14, 0x67, 0x40, 0x38, 0x35, 0x7d, 0xfb, 0x14,
+ 0x21, 0xb7, 0x2d, 0xed, 0x2b, 0x22, 0xc6, 0xa5, 0x98, 0x46, 0x51, 0x12, 0x3e, 0x3b, 0xd5, 0x14,
+ 0xde, 0x67, 0xaa, 0x79, 0x29, 0x81, 0x79, 0x9c, 0xfe, 0x0f, 0x9f, 0x56, 0xc6, 0xb9, 0x07, 0xb7,
+ 0x47, 0xe9, 0xbf, 0xd3, 0x10, 0x5a, 0x45, 0xb8, 0x31, 0xdf, 0xc3, 0xa0, 0x28, 0x63, 0xed, 0x23,
+ 0x0f, 0x56, 0xa9, 0x81, 0x77, 0xf2, 0x03, 0x0d, 0xbc, 0xca, 0xdf, 0x72, 0x40, 0x3e, 0xe1, 0x29,
+ 0x87, 0x1b, 0x00, 0xda, 0x7b, 0x94, 0xb8, 0x87, 0xa4, 0xf1, 0x30, 0xf8, 0x64, 0x13, 0x76, 0xd0,
+ 0xf9, 0xb8, 0xbd, 0xda, 0xca, 0x48, 0xa0, 0x3e, 0x5a, 0xd0, 0x04, 0xd3, 0x5e, 0xa2, 0xf3, 0x1b,
+ 0x65, 0x22, 0x10, 0x8e, 0x25, 0x1b, 0x47, 0x6d, 0xbe, 0xeb, 0xcb, 0xa9, 0x56, 0x12, 0xa5, 0xe0,
+ 0xa1, 0x0e, 0x80, 0x1e, 0x9f, 0x5e, 0x70, 0x01, 0x6a, 0xc3, 0x95, 0xb3, 0xf8, 0xcc, 0xa2, 0x27,
+ 0x28, 0x71, 0x5c, 0x09, 0x58, 0xe5, 0x33, 0x09, 0x80, 0xf8, 0x56, 0xc0, 0xab, 0x20, 0xf1, 0x29,
+ 0x44, 0xbc, 0x62, 0x05, 0x06, 0x81, 0x12, 0x74, 0xb8, 0x0c, 0x26, 0x4d, 0x42, 0x29, 0x6e, 0x86,
+ 0x73, 0x40, 0xf4, 0xa9, 0xa7, 0x1e, 0x90, 0x51, 0xc8, 0x87, 0xbb, 0x60, 0xc2, 0x25, 0x98, 0x8a,
+ 0xf9, 0xb3, 0xa8, 0xdd, 0x65, 0x6d, 0x15, 0xe2, 0x94, 0x63, 0x5f, 0x5e, 0x19, 0xe6, 0xa3, 0x9e,
+ 0x2a, 0xba, 0x30, 0xae, 0x84, 0x04, 0x1c, 0x7c, 0x08, 0xca, 0xc2, 0x46, 0x62, 0xc3, 0xc1, 0xad,
+ 0xbd, 0x24, 0x76, 0x53, 0xae, 0xf7, 0x0a, 0xa0, 0xac, 0x8e, 0xb2, 0x01, 0xa6, 0xc2, 0xec, 0x82,
+ 0x15, 0x50, 0x48, 0x3c, 0xdf, 0x81, 0xe3, 0x9c, 0xd2, 0x13, 0x98, 0x5c, 0xff, 0xc0, 0x68, 0x5b,
+ 0xaf, 0xde, 0x56, 0xc7, 0x5e, 0xbf, 0xad, 0x8e, 0xbd, 0x79, 0x5b, 0x1d, 0x7b, 0xd9, 0xad, 0x4a,
+ 0xaf, 0xba, 0x55, 0xe9, 0x75, 0xb7, 0x2a, 0xbd, 0xe9, 0x56, 0xa5, 0x7f, 0x76, 0xab, 0xd2, 0xef,
+ 0xfe, 0x55, 0x1d, 0xfb, 0xe1, 0xf2, 0xd0, 0x1f, 0x65, 0x3f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xac,
+ 0xc8, 0x8c, 0x78, 0xc0, 0x1d, 0x00, 0x00,
+}
+
+func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
@@ -709,6 +1017,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *JSONPatch) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *MatchCondition) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -824,7 +1160,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -834,18 +1170,18 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
- size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -854,19 +1190,20 @@ func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error)
}
i--
dAtA[i] = 0x12
- if len(m.ResourceNames) > 0 {
- for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ResourceNames[iNdEx])
- copy(dAtA[i:], m.ResourceNames[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
- i--
- dAtA[i] = 0xa
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ParamKind) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -876,187 +1213,12 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.Kind)
- copy(dAtA[i:], m.Kind)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
- i--
- dAtA[i] = 0x12
- i -= len(m.APIVersion)
- copy(dAtA[i:], m.APIVersion)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ParamRef) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ParameterNotFoundAction != nil {
- i -= len(*m.ParameterNotFoundAction)
- copy(dAtA[i:], *m.ParameterNotFoundAction)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
- i--
- dAtA[i] = 0x22
- }
- if m.Selector != nil {
- {
- size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- i -= len(m.Namespace)
- copy(dAtA[i:], m.Namespace)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ExpressionWarnings) > 0 {
- for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1084,7 +1246,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1094,12 +1256,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1131,7 +1293,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte)
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1141,25 +1303,16 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.ValidationActions) > 0 {
- for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ValidationActions[iNdEx])
- copy(dAtA[i:], m.ValidationActions[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
if m.MatchResources != nil {
{
size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
@@ -1192,7 +1345,7 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1202,12 +1355,12 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -1239,7 +1392,7 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1249,30 +1402,21 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Variables) > 0 {
- for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
+ i -= len(m.ReinvocationPolicy)
+ copy(dAtA[i:], m.ReinvocationPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy)))
+ i--
+ dAtA[i] = 0x3a
if len(m.MatchConditions) > 0 {
for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -1287,10 +1431,17 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
dAtA[i] = 0x32
}
}
- if len(m.AuditAnnotations) > 0 {
- for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Mutations) > 0 {
+ for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1298,20 +1449,13 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x2a
+ dAtA[i] = 0x22
}
}
- if m.FailurePolicy != nil {
- i -= len(*m.FailurePolicy)
- copy(dAtA[i:], *m.FailurePolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Validations) > 0 {
- for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1349,7 +1493,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+func (m *Mutation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1359,33 +1503,31 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
+ if m.JSONPatch != nil {
+ {
+ size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
- i--
- dAtA[i] = 0x1a
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x22
}
- if m.TypeChecking != nil {
+ if m.ApplyConfiguration != nil {
{
- size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -1393,15 +1535,17 @@ func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x1a
}
- i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i -= len(m.PatchType)
+ copy(dAtA[i:], m.PatchType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType)))
i--
- dAtA[i] = 0x8
+ dAtA[i] = 0x12
return len(dAtA) - i, nil
}
-func (m *Validation) Marshal() (dAtA []byte, err error) {
+func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1411,42 +1555,72 @@ func (m *Validation) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.MessageExpression)
- copy(dAtA[i:], m.MessageExpression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
+ {
+ size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x22
- if m.Reason != nil {
- i -= len(*m.Reason)
- copy(dAtA[i:], *m.Reason)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
- i--
- dAtA[i] = 0x1a
+ dAtA[i] = 0x12
+ if len(m.ResourceNames) > 0 {
+ for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ResourceNames[iNdEx])
+ copy(dAtA[i:], m.ResourceNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
}
- i -= len(m.Message)
- copy(dAtA[i:], m.Message)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ return len(dAtA) - i, nil
+}
+
+func (m *ParamKind) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0x12
- i -= len(m.Expression)
- copy(dAtA[i:], m.Expression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i -= len(m.APIVersion)
+ copy(dAtA[i:], m.APIVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *Variable) Marshal() (dAtA []byte, err error) {
+func (m *ParamRef) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1456,19 +1630,38 @@ func (m *Variable) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- i -= len(m.Expression)
- copy(dAtA[i:], m.Expression)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ if m.ParameterNotFoundAction != nil {
+ i -= len(*m.ParameterNotFoundAction)
+ copy(dAtA[i:], *m.ParameterNotFoundAction)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
i--
dAtA[i] = 0x12
i -= len(m.Name)
@@ -1479,606 +1672,2773 @@ func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *AuditAnnotation) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ValueExpression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+
+func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ExpressionWarning) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.FieldRef)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Warning)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ if len(m.ExpressionWarnings) > 0 {
+ for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
}
-func (m *MatchCondition) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *MatchResources) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.NamespaceSelector != nil {
- l = m.NamespaceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ObjectSelector != nil {
- l = m.ObjectSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.ResourceRules) > 0 {
- for _, e := range m.ResourceRules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if len(m.ExcludeResourceRules) > 0 {
- for _, e := range m.ExcludeResourceRules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if m.MatchPolicy != nil {
- l = len(*m.MatchPolicy)
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *NamedRuleWithOperations) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceNames) > 0 {
- for _, s := range m.ResourceNames {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
+func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- l = m.RuleWithOperations.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *ParamKind) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.APIVersion)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Kind)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ParamRef) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Selector != nil {
- l = m.Selector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ParameterNotFoundAction != nil {
- l = len(*m.ParameterNotFoundAction)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *TypeChecking) Size() (n int) {
- if m == nil {
- return 0
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- var l int
- _ = l
- if len(m.ExpressionWarnings) > 0 {
- for _, e := range m.ExpressionWarnings {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicy) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- return n
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.PolicyName)
- n += 1 + l + sovGenerated(uint64(l))
- if m.ParamRef != nil {
- l = m.ParamRef.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ValidationActions) > 0 {
+ for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ValidationActions[iNdEx])
+ copy(dAtA[i:], m.ValidationActions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
}
if m.MatchResources != nil {
- l = m.MatchResources.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
- if len(m.ValidationActions) > 0 {
- for _, s := range m.ValidationActions {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamRef != nil {
+ {
+ size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
}
- return n
+ i -= len(m.PolicyName)
+ copy(dAtA[i:], m.PolicyName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyList) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- return n
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- if m.ParamKind != nil {
- l = m.ParamKind.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
}
- if m.MatchConstraints != nil {
- l = m.MatchConstraints.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
}
- if len(m.Validations) > 0 {
- for _, e := range m.Validations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AuditAnnotations) > 0 {
+ for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
}
}
if m.FailurePolicy != nil {
- l = len(*m.FailurePolicy)
- n += 1 + l + sovGenerated(uint64(l))
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
}
- if len(m.AuditAnnotations) > 0 {
- for _, e := range m.AuditAnnotations {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Validations) > 0 {
+ for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- if len(m.MatchConditions) > 0 {
- for _, e := range m.MatchConditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.MatchConstraints != nil {
+ {
+ size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
}
- if len(m.Variables) > 0 {
- for _, e := range m.Variables {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamKind != nil {
+ {
+ size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- n += 1 + sovGenerated(uint64(m.ObservedGeneration))
- if m.TypeChecking != nil {
- l = m.TypeChecking.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- return n
+ if m.TypeChecking != nil {
+ {
+ size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
}
-func (m *Validation) Size() (n int) {
- if m == nil {
- return 0
+func (m *Validation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Message)
- n += 1 + l + sovGenerated(uint64(l))
+ i -= len(m.MessageExpression)
+ copy(dAtA[i:], m.MessageExpression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
+ i--
+ dAtA[i] = 0x22
if m.Reason != nil {
- l = len(*m.Reason)
- n += 1 + l + sovGenerated(uint64(l))
+ i -= len(*m.Reason)
+ copy(dAtA[i:], *m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+ i--
+ dAtA[i] = 0x1a
}
- l = len(m.MessageExpression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *Variable) Size() (n int) {
- if m == nil {
- return 0
+func (m *Variable) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *AuditAnnotation) String() string {
- if this == nil {
- return "nil"
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
}
- s := strings.Join([]string{`&AuditAnnotation{`,
- `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
- `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
- `}`,
- }, "")
- return s
+ dAtA[offset] = uint8(v)
+ return base
}
-func (this *ExpressionWarning) String() string {
- if this == nil {
- return "nil"
+func (m *ApplyConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ExpressionWarning{`,
- `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
- `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *MatchCondition) String() string {
- if this == nil {
- return "nil"
+
+func (m *AuditAnnotation) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&MatchCondition{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ValueExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *MatchResources) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
- for _, f := range this.ResourceRules {
- repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
- }
- repeatedStringForResourceRules += "}"
- repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
- for _, f := range this.ExcludeResourceRules {
- repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+
+func (m *ExpressionWarning) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForExcludeResourceRules += "}"
- s := strings.Join([]string{`&MatchResources{`,
- `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ResourceRules:` + repeatedStringForResourceRules + `,`,
- `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
- `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.FieldRef)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Warning)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *NamedRuleWithOperations) String() string {
- if this == nil {
- return "nil"
+
+func (m *JSONPatch) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&NamedRuleWithOperations{`,
- `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
- `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ParamKind) String() string {
- if this == nil {
- return "nil"
+
+func (m *MatchCondition) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ParamKind{`,
- `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
- `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ParamRef) String() string {
- if this == nil {
- return "nil"
+
+func (m *MatchResources) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ParamRef{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
- `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *TypeChecking) String() string {
- if this == nil {
- return "nil"
+ var l int
+ _ = l
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
- for _, f := range this.ExpressionWarnings {
- repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForExpressionWarnings += "}"
- s := strings.Join([]string{`&TypeChecking{`,
- `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
- `}`,
- }, "")
- return s
+ if len(m.ResourceRules) > 0 {
+ for _, e := range m.ResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ExcludeResourceRules) > 0 {
+ for _, e := range m.ExcludeResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
}
-func (this *ValidatingAdmissionPolicy) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicy) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ValidatingAdmissionPolicyBinding) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicyBinding) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ValidatingAdmissionPolicyBindingList) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicyBindingList) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
+ return n
}
-func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) {
+ if m == nil {
+ return 0
}
- s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
- `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
- `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
- `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
- `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
- `}`,
- }, "")
- return s
+ var l int
+ _ = l
+ l = len(m.PolicyName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamRef != nil {
+ l = m.ParamRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MatchResources != nil {
+ l = m.MatchResources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
}
-func (this *ValidatingAdmissionPolicyList) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
+ return n
}
-func (this *ValidatingAdmissionPolicySpec) String() string {
- if this == nil {
- return "nil"
+
+func (m *MutatingAdmissionPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForValidations := "[]Validation{"
- for _, f := range this.Validations {
- repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+ var l int
+ _ = l
+ if m.ParamKind != nil {
+ l = m.ParamKind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForValidations += "}"
- repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
- for _, f := range this.AuditAnnotations {
- repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+ if m.MatchConstraints != nil {
+ l = m.MatchConstraints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
}
- repeatedStringForAuditAnnotations += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForMatchConditions += "}"
- repeatedStringForVariables := "[]Variable{"
- for _, f := range this.Variables {
- repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ if len(m.Mutations) > 0 {
+ for _, e := range m.Mutations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
}
- repeatedStringForVariables += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
- `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
- `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
- `Validations:` + repeatedStringForValidations + `,`,
- `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
- `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
- `MatchConditions:` + repeatedStringForMatchConditions + `,`,
- `Variables:` + repeatedStringForVariables + `,`,
- `}`,
- }, "")
- return s
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ReinvocationPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *ValidatingAdmissionPolicyStatus) String() string {
- if this == nil {
- return "nil"
+
+func (m *Mutation) Size() (n int) {
+ if m == nil {
+ return 0
}
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ var l int
+ _ = l
+ l = len(m.PatchType)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ApplyConfiguration != nil {
+ l = m.ApplyConfiguration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.JSONPatch != nil {
+ l = m.JSONPatch.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NamedRuleWithOperations) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RuleWithOperations.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ParamKind) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ParamRef) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ParameterNotFoundAction != nil {
+ l = len(*m.ParameterNotFoundAction)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *TypeChecking) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExpressionWarnings) > 0 {
+ for _, e := range m.ExpressionWarnings {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PolicyName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamRef != nil {
+ l = m.ParamRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MatchResources != nil {
+ l = m.MatchResources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ValidationActions) > 0 {
+ for _, s := range m.ValidationActions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ParamKind != nil {
+ l = m.ParamKind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MatchConstraints != nil {
+ l = m.MatchConstraints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Validations) > 0 {
+ for _, e := range m.Validations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.AuditAnnotations) > 0 {
+ for _, e := range m.AuditAnnotations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.TypeChecking != nil {
+ l = m.TypeChecking.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Validation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Reason != nil {
+ l = len(*m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.MessageExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Variable) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ApplyConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ApplyConfiguration{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AuditAnnotation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditAnnotation{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExpressionWarning) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExpressionWarning{`,
+ `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
+ `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *JSONPatch) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&JSONPatch{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MatchCondition{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchResources) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ResourceRules {
+ repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResourceRules += "}"
+ repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ExcludeResourceRules {
+ repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExcludeResourceRules += "}"
+ s := strings.Join([]string{`&MatchResources{`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ResourceRules:` + repeatedStringForResourceRules + `,`,
+ `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MutatingAdmissionPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicyBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicyBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicyBindingSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`,
+ `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+ `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+ `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]MutatingAdmissionPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&MutatingAdmissionPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingAdmissionPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
+ repeatedStringForMutations := "[]Mutation{"
+ for _, f := range this.Mutations {
+ repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMutations += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`,
+ `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+ `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
+ `Mutations:` + repeatedStringForMutations + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Mutation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Mutation{`,
+ `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`,
+ `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`,
+ `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedRuleWithOperations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedRuleWithOperations{`,
+ `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+ `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamKind) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamKind{`,
+ `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamRef) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamRef{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TypeChecking) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
+ for _, f := range this.ExpressionWarnings {
+ repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExpressionWarnings += "}"
+ s := strings.Join([]string{`&TypeChecking{`,
+ `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
+ `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+ `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+ `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForValidations := "[]Validation{"
+ for _, f := range this.Validations {
+ repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForValidations += "}"
+ repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
+ for _, f := range this.AuditAnnotations {
+ repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAuditAnnotations += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
+ `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+ `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `Validations:` + repeatedStringForValidations + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Validation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Validation{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Reason:` + valueToStringGenerated(this.Reason) + `,`,
+ `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Variable) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Variable{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ValueExpression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldRef = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warning = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JSONPatch) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchResources) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectSelector == nil {
+ m.ObjectSelector = &v1.LabelSelector{}
+ }
+ if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
+ if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
+ if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := MatchPolicyType(dAtA[iNdEx:postIndex])
+ m.MatchPolicy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, MutatingAdmissionPolicyBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PolicyName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ParamRef == nil {
+ m.ParamRef = &ParamRef{}
+ }
+ if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MatchResources == nil {
+ m.MatchResources = &MatchResources{}
+ }
+ if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
- `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
- `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Validation) String() string {
- if this == nil {
- return "nil"
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- s := strings.Join([]string{`&Validation{`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
- `Reason:` + valueToStringGenerated(this.Reason) + `,`,
- `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
- `}`,
- }, "")
- return s
+ return nil
}
-func (this *Variable) String() string {
- if this == nil {
- return "nil"
+func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, MutatingAdmissionPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- s := strings.Join([]string{`&Variable{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+ return nil
}
-func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2101,17 +4461,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+ return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2121,29 +4481,69 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Key = string(dAtA[iNdEx:postIndex])
+ if m.ParamKind == nil {
+ m.ParamKind = &ParamKind{}
+ }
+ if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MatchConstraints == nil {
+ m.MatchConstraints = &MatchResources{}
+ }
+ if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2153,79 +4553,31 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ValueExpression = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
+ m.Variables = append(m.Variables, Variable{})
+ if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
+ iNdEx = postIndex
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2235,27 +4587,29 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.FieldRef = string(dAtA[iNdEx:postIndex])
+ m.Mutations = append(m.Mutations, Mutation{})
+ if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 3:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -2283,63 +4637,14 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Warning = string(dAtA[iNdEx:postIndex])
+ s := FailurePolicyType(dAtA[iNdEx:postIndex])
+ m.FailurePolicy = &s
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MatchCondition) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2349,27 +4654,29 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+ if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -2397,7 +4704,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Expression = string(dAtA[iNdEx:postIndex])
+ m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -2420,7 +4727,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MatchResources) Unmarshal(dAtA []byte) error {
+func (m *Mutation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2443,53 +4750,17 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+ return fmt.Errorf("proto: Mutation: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.NamespaceSelector == nil {
- m.NamespaceSelector = &v1.LabelSelector{}
- }
- if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2499,31 +4770,27 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ObjectSelector == nil {
- m.ObjectSelector = &v1.LabelSelector{}
- }
- if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.PatchType = PatchType(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2550,14 +4817,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
- if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.ApplyConfiguration == nil {
+ m.ApplyConfiguration = &ApplyConfiguration{}
+ }
+ if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2584,43 +4853,12 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
- if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+ if m.JSONPatch == nil {
+ m.JSONPatch = &JSONPatch{}
}
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- s := MatchPolicyType(dAtA[iNdEx:postIndex])
- m.MatchPolicy = &s
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
index d5974d5ec..88344ce87 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
@@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/admissionregistration/v1alpha1";
+// ApplyConfiguration defines the desired configuration values of an object.
+message ApplyConfiguration {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ optional string expression = 1;
+}
+
// AuditAnnotation describes how to produce an audit annotation for an API request.
message AuditAnnotation {
// key specifies the audit annotation key. The audit annotation keys of
@@ -79,6 +124,75 @@ message ExpressionWarning {
optional string warning = 3;
}
+// JSONPatch defines a JSON Patch.
+message JSONPatch {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ optional string expression = 1;
+}
+
message MatchCondition {
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
// as well as providing an identifier for logging purposes. A good name should be descriptive of
@@ -202,6 +316,193 @@ message MatchResources {
optional string matchPolicy = 7;
}
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+message MutatingAdmissionPolicy {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ optional MutatingAdmissionPolicySpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+message MutatingAdmissionPolicyBinding {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ optional MutatingAdmissionPolicyBindingSpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of PolicyBinding.
+ repeated MutatingAdmissionPolicyBinding items = 2;
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingSpec {
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ optional string policyName = 1;
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ optional ParamRef paramRef = 2;
+
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // +optional
+ optional MatchResources matchResources = 3;
+}
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+message MutatingAdmissionPolicyList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ValidatingAdmissionPolicy.
+ repeated MutatingAdmissionPolicy items = 2;
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+message MutatingAdmissionPolicySpec {
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ optional ParamKind paramKind = 1;
+
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ optional MatchResources matchConstraints = 2;
+
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ // +listType=atomic
+ // +optional
+ repeated Variable variables = 3;
+
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ //
+ // +listType=atomic
+ // +optional
+ repeated Mutation mutations = 4;
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ optional string failurePolicy = 5;
+
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ repeated MatchCondition matchConditions = 6;
+
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ optional string reinvocationPolicy = 7;
+}
+
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+message Mutation {
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ //
+ // +unionDiscriminator
+ optional string patchType = 2;
+
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ optional ApplyConfiguration applyConfiguration = 3;
+
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ optional JSONPatch jsonPatch = 4;
+}
+
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
// +structType=atomic
message NamedRuleWithOperations {
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
index d4c2fbe80..eead376cc 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
@@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ValidatingAdmissionPolicyList{},
&ValidatingAdmissionPolicyBinding{},
&ValidatingAdmissionPolicyBindingList{},
+ &MutatingAdmissionPolicy{},
+ &MutatingAdmissionPolicyList{},
+ &MutatingAdmissionPolicyBinding{},
+ &MutatingAdmissionPolicyBindingList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
index 78d918bc7..ee50fbe2d 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
@@ -663,3 +663,346 @@ const (
Delete OperationType = v1.Delete
Connect OperationType = v1.Connect
)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+type MutatingAdmissionPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+type MutatingAdmissionPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of ValidatingAdmissionPolicy.
+ Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+type MutatingAdmissionPolicySpec struct {
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
+
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
+
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ // +listType=atomic
+ // +optional
+ Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
+
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ //
+ // +listType=atomic
+ // +optional
+ Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
+
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
+
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
+}
+
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+type Mutation struct {
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ //
+ // +unionDiscriminator
+ PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
+
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
+
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
+}
+
+// PatchType specifies the type of patch operation for a mutation.
+// +enum
+type PatchType string
+
+const (
+ // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
+ PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
+ // JSONPatch indicates that the object is mutated through JSON Patch.
+ PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// ApplyConfiguration defines the desired configuration values of an object.
+type ApplyConfiguration struct {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// JSONPatch defines a JSON Patch.
+type JSONPatch struct {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
+ Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// ReinvocationPolicyType specifies what type of policy the admission mutation uses.
+// +enum
+type ReinvocationPolicyType = v1.ReinvocationPolicyType
+
+const (
+ // NeverReinvocationPolicy indicates that the mutation must not be called more than once in a
+ // single admission evaluation.
+ NeverReinvocationPolicy ReinvocationPolicyType = v1.NeverReinvocationPolicy
+ // IfNeededReinvocationPolicy indicates that the mutation may be called at least one
+ // additional time as part of the admission evaluation if the object being admitted is
+ // modified by other admission plugins after the initial mutation call.
+ IfNeededReinvocationPolicy ReinvocationPolicyType = v1.IfNeededReinvocationPolicy
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+type MutatingAdmissionPolicyBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of PolicyBinding.
+ Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingSpec struct {
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
+
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // +optional
+ MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
index dcf46b324..32222a81b 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
@@ -27,6 +27,15 @@ package v1alpha1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ApplyConfiguration = map[string]string{
+ "": "ApplyConfiguration defines the desired configuration values of an object.",
+ "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (ApplyConfiguration) SwaggerDoc() map[string]string {
+ return map_ApplyConfiguration
+}
+
var map_AuditAnnotation = map[string]string{
"": "AuditAnnotation describes how to produce an audit annotation for an API request.",
"key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
@@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
return map_ExpressionWarning
}
+var map_JSONPatch = map[string]string{
+ "": "JSONPatch defines a JSON Patch.",
+ "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (JSONPatch) SwaggerDoc() map[string]string {
+ return map_JSONPatch
+}
+
var map_MatchResources = map[string]string{
"": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
"namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
@@ -60,6 +78,83 @@ func (MatchResources) SwaggerDoc() map[string]string {
return map_MatchResources
}
+var map_MutatingAdmissionPolicy = map[string]string{
+ "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicy
+}
+
+var map_MutatingAdmissionPolicyBinding = map[string]string{
+ "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBinding
+}
+
+var map_MutatingAdmissionPolicyBindingList = map[string]string{
+ "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of PolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBindingList
+}
+
+var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
+ "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
+ "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
+ "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
+ "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
+}
+
+func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyBindingSpec
+}
+
+var map_MutatingAdmissionPolicyList = map[string]string{
+ "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of ValidatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicyList
+}
+
+var map_MutatingAdmissionPolicySpec = map[string]string{
+ "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
+ "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
+ "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
+ "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
+ "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
+ "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
+ "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
+ "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
+}
+
+func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
+ return map_MutatingAdmissionPolicySpec
+}
+
+var map_Mutation = map[string]string{
+ "": "Mutation specifies the CEL expression which is used to apply the Mutation.",
+ "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
+ "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
+ "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
+}
+
+func (Mutation) SwaggerDoc() map[string]string {
+ return map_Mutation
+}
+
var map_NamedRuleWithOperations = map[string]string{
"": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
index 24cd0e4e9..97c159c74 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
@@ -26,6 +26,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
+func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ApplyConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
*out = *in
@@ -58,6 +74,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
+func (in *JSONPatch) DeepCopy() *JSONPatch {
+ if in == nil {
+ return nil
+ }
+ out := new(JSONPatch)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
@@ -119,6 +151,226 @@ func (in *MatchResources) DeepCopy() *MatchResources {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
+func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
+func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MutatingAdmissionPolicyBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
+ *out = *in
+ if in.ParamRef != nil {
+ in, out := &in.ParamRef, &out.ParamRef
+ *out = new(ParamRef)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MatchResources != nil {
+ in, out := &in.MatchResources, &out.MatchResources
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyBindingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MutatingAdmissionPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
+func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
+ *out = *in
+ if in.ParamKind != nil {
+ in, out := &in.ParamKind, &out.ParamKind
+ *out = new(ParamKind)
+ **out = **in
+ }
+ if in.MatchConstraints != nil {
+ in, out := &in.MatchConstraints, &out.MatchConstraints
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Variables != nil {
+ in, out := &in.Variables, &out.Variables
+ *out = make([]Variable, len(*in))
+ copy(*out, *in)
+ }
+ if in.Mutations != nil {
+ in, out := &in.Mutations, &out.Mutations
+ *out = make([]Mutation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailurePolicy != nil {
+ in, out := &in.FailurePolicy, &out.FailurePolicy
+ *out = new(FailurePolicyType)
+ **out = **in
+ }
+ if in.MatchConditions != nil {
+ in, out := &in.MatchConditions, &out.MatchConditions
+ *out = make([]MatchCondition, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
+func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MutatingAdmissionPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Mutation) DeepCopyInto(out *Mutation) {
+ *out = *in
+ if in.ApplyConfiguration != nil {
+ in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
+ *out = new(ApplyConfiguration)
+ **out = **in
+ }
+ if in.JSONPatch != nil {
+ in, out := &in.JSONPatch, &out.JSONPatch
+ *out = new(JSONPatch)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
+func (in *Mutation) DeepCopy() *Mutation {
+ if in == nil {
+ return nil
+ }
+ out := new(Mutation)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
*out = *in
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 000000000..91c813d5f
--- /dev/null
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,166 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
+ return 1, 26
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
+ return 1, 29
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
+ return 1, 26
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
+ return 1, 29
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 26
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 29
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 26
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 29
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
+ return 1, 32
+}
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
index d864f2eeb..388e638f4 100644
--- a/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -737,8 +737,7 @@ message StatefulSetSpec {
// volume claims are created as needed and retained until manually deleted. This
// policy allows the lifecycle to be altered, for example by deleting persistent
// volume claims when their stateful set is deleted, or when their pod is scaled
- // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
- // which is beta.
+ // down.
// +optional
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index e942cd526..a68690b44 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -142,7 +142,7 @@ const (
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will not be deleted.
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
- // RetentionPersistentVolumeClaimRetentionPolicyType specifies that
+ // DeletePersistentVolumeClaimRetentionPolicyType specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will be deleted in the scenario specified in
// StatefulSetPersistentVolumeClaimRetentionPolicy.
@@ -255,8 +255,7 @@ type StatefulSetSpec struct {
// volume claims are created as needed and retained until manually deleted. This
// policy allows the lifecycle to be altered, for example by deleting persistent
// volume claims when their stateful set is deleted, or when their pod is scaled
- // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
- // which is beta.
+ // down.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index f3e221a0e..341ecdadb 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -354,7 +354,7 @@ var map_StatefulSetSpec = map[string]string{
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.",
+ "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down.",
"ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
}
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
index 4b0fa366c..46d7bfdf9 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -486,8 +486,7 @@ message StatefulSetSpec {
optional int32 minReadySeconds = 9;
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates. This requires the
- // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
+ // the StatefulSet VolumeClaimTemplates.
// +optional
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
index 07bfa88c5..bc4851957 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -181,11 +181,11 @@ const (
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will not be deleted.
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
- // RetentionPersistentVolumeClaimRetentionPolicyType specifies that
+ // DeletePersistentVolumeClaimRetentionPolicyType specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will be deleted in the scenario specified in
// StatefulSetPersistentVolumeClaimRetentionPolicy.
- RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
+ DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
)
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
@@ -290,8 +290,7 @@ type StatefulSetSpec struct {
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates. This requires the
- // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
+ // the StatefulSet VolumeClaimTemplates.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
index 9e7fb1adc..1381d75dc 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -258,7 +258,7 @@ var map_StatefulSetSpec = map[string]string{
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
"minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.",
+ "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.",
"ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
}
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
index d3db8956e..c08a4c78b 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -778,8 +778,7 @@ message StatefulSetSpec {
optional int32 minReadySeconds = 9;
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates. This requires the
- // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
+ // the StatefulSet VolumeClaimTemplates.
// +optional
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
index f93a5bea7..c2624a941 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -191,11 +191,11 @@ const (
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will not be deleted.
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
- // RetentionPersistentVolumeClaimRetentionPolicyType specifies that
+ // DeletePersistentVolumeClaimRetentionPolicyType specifies that
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
// will be deleted in the scenario specified in
// StatefulSetPersistentVolumeClaimRetentionPolicy.
- RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
+ DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
)
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
@@ -300,8 +300,7 @@ type StatefulSetSpec struct {
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates. This requires the
- // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
+ // the StatefulSet VolumeClaimTemplates.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 0b8fe34af..beec4b755 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -382,7 +382,7 @@ var map_StatefulSetSpec = map[string]string{
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.",
+ "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.",
"ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
}
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
index 0a961312f..68c35b6b2 100644
--- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
@@ -241,8 +241,6 @@ message HorizontalPodAutoscalerStatus {
message MetricSpec {
// type is the type of metric source. It should be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
@@ -269,7 +267,6 @@ message MetricSpec {
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
optional ContainerResourceMetricSource containerResource = 7;
@@ -286,8 +283,6 @@ message MetricSpec {
message MetricStatus {
// type is the type of metric source. It will be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
index b31425b3b..85c609e5c 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types.go
@@ -193,8 +193,6 @@ const (
type MetricSpec struct {
// type is the type of metric source. It should be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
@@ -221,7 +219,6 @@ type MetricSpec struct {
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"`
@@ -355,8 +352,6 @@ type ExternalMetricSource struct {
type MetricStatus struct {
// type is the type of metric source. It will be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
index 37c2b36a5..ba43d06c1 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
@@ -147,11 +147,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
var map_MetricSpec = map[string]string{
"": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
- "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
+ "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
@@ -161,7 +161,7 @@ func (MetricSpec) SwaggerDoc() map[string]string {
var map_MetricStatus = map[string]string{
"": "MetricStatus describes the last-read state of a single metric.",
- "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto
index 8f2ee5803..4e6dc0592 100644
--- a/vendor/k8s.io/api/autoscaling/v2/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto
@@ -301,8 +301,6 @@ message MetricIdentifier {
message MetricSpec {
// type is the type of metric source. It should be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
@@ -329,7 +327,6 @@ message MetricSpec {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
optional ContainerResourceMetricSource containerResource = 7;
@@ -346,8 +343,6 @@ message MetricSpec {
message MetricStatus {
// type is the type of metric source. It will be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go
index 69a7b2701..99e8db09d 100644
--- a/vendor/k8s.io/api/autoscaling/v2/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2/types.go
@@ -102,8 +102,6 @@ type CrossVersionObjectReference struct {
type MetricSpec struct {
// type is the type of metric source. It should be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
@@ -130,7 +128,6 @@ type MetricSpec struct {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"`
@@ -453,8 +450,6 @@ type HorizontalPodAutoscalerCondition struct {
type MetricStatus struct {
// type is the type of metric source. It will be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
index 1941b1ef5..649cd04a0 100644
--- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
@@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string {
var map_MetricSpec = map[string]string{
"": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
- "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
+ "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
@@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string {
var map_MetricStatus = map[string]string{
"": "MetricStatus describes the last-read state of a single metric.",
- "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
index 232a59815..4b71732ab 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
@@ -260,8 +260,6 @@ message HorizontalPodAutoscalerStatus {
message MetricSpec {
// type is the type of metric source. It should be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
@@ -288,7 +286,6 @@ message MetricSpec {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
optional ContainerResourceMetricSource containerResource = 7;
@@ -305,8 +302,6 @@ message MetricSpec {
message MetricStatus {
// type is the type of metric source. It will be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go
index 193cc4354..c3abdd9bd 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go
@@ -96,8 +96,6 @@ const (
type MetricSpec struct {
// type is the type of metric source. It should be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
@@ -121,7 +119,6 @@ type MetricSpec struct {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"`
// external refers to a global metric that is not associated
@@ -311,8 +308,6 @@ type HorizontalPodAutoscalerCondition struct {
type MetricStatus struct {
// type is the type of metric source. It will be one of "ContainerResource",
// "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
index d656ee416..c7c72bf35 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
@@ -148,11 +148,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string {
var map_MetricSpec = map[string]string{
"": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
- "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
+ "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
@@ -162,7 +162,7 @@ func (MetricSpec) SwaggerDoc() map[string]string {
var map_MetricStatus = map[string]string{
"": "MetricStatus describes the last-read state of a single metric.",
- "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
index c88fc1fe2..941d9752a 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
@@ -297,8 +297,6 @@ message MetricIdentifier {
message MetricSpec {
// type is the type of metric source. It should be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
@@ -325,7 +323,6 @@ message MetricSpec {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
optional ContainerResourceMetricSource containerResource = 7;
@@ -342,8 +339,6 @@ message MetricSpec {
message MetricStatus {
// type is the type of metric source. It will be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
optional string type = 1;
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
index 2fee0b8a0..bc9677b14 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
@@ -104,8 +104,6 @@ type CrossVersionObjectReference struct {
type MetricSpec struct {
// type is the type of metric source. It should be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each mapping to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
@@ -132,7 +130,6 @@ type MetricSpec struct {
// each pod of the current scale target (e.g. CPU or memory). Such metrics are
// built in to Kubernetes, and have special scaling options on top of those
// available to normal per-pod metrics using the "pods" source.
- // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
// +optional
ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"`
@@ -449,8 +446,6 @@ type HorizontalPodAutoscalerCondition struct {
type MetricStatus struct {
// type is the type of metric source. It will be one of "ContainerResource", "External",
// "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
- // Note: "ContainerResource" type is available on when the feature-gate
- // HPAContainerMetrics is enabled
Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"`
// object refers to a metric describing a single kubernetes object
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
index 4af7d0ec0..5d4bb86b8 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
@@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string {
var map_MetricSpec = map[string]string{
"": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
- "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
- "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
+ "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
"external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
}
@@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string {
var map_MetricStatus = map[string]string{
"": "MetricStatus describes the last-read state of a single metric.",
- "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
+ "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.",
"object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
"pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
"resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index f5a9385f5..361ebdca1 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -350,8 +350,8 @@ message JobSpec {
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
//
- // This field is alpha-level. The job controller accepts setting the field
- // when the feature gate JobManagedBy is enabled (disabled by default).
+ // This field is beta-level. The job controller accepts setting the field
+ // when the feature gate JobManagedBy is enabled (enabled by default).
// +optional
optional string managedBy = 15;
}
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index b42ec231e..8e9a761b9 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -29,7 +29,6 @@ const (
// CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job.
// It records the original/expected scheduled timestamp for the running job, represented in RFC3339.
- // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled.
CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp"
JobCompletionIndexAnnotation = labelPrefix + "job-completion-index"
@@ -480,8 +479,8 @@ type JobSpec struct {
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
//
- // This field is alpha-level. The job controller accepts setting the field
- // when the feature gate JobManagedBy is enabled (disabled by default).
+ // This field is beta-level. The job controller accepts setting the field
+ // when the feature gate JobManagedBy is enabled (enabled by default).
// +optional
ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"`
}
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index d50488788..893f3371f 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
- "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).",
+ "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
}
func (JobSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go
index 1a9fda011..beef02599 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/types.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go
@@ -23,6 +23,7 @@ import (
// +genclient
// +genclient:nonNamespaced
// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:deprecated=1.34
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
@@ -90,6 +91,7 @@ type ClusterTrustBundleSpec struct {
}
// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:deprecated=1.34
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTrustBundleList is a collection of ClusterTrustBundle objects
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
index dfafa656c..3121a87d0 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
@@ -30,13 +30,13 @@ func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) {
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) {
- return 1, 29
+ return 1, 34
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) {
- return 1, 32
+ return 1, 37
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
@@ -48,11 +48,11 @@ func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) {
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
- return 1, 29
+ return 1, 34
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
- return 1, 32
+ return 1, 37
}
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/doc.go b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
similarity index 92%
rename from vendor/k8s.io/api/coordination/v1alpha1/doc.go
rename to vendor/k8s.io/api/coordination/v1alpha2/doc.go
index 33a0b0ea9..5e6d65530 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/doc.go
@@ -21,4 +21,4 @@ limitations under the License.
// +groupName=coordination.k8s.io
-package v1alpha1 // import "k8s.io/api/coordination/v1alpha1"
+package v1alpha2 // import "k8s.io/api/coordination/v1alpha2"
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
similarity index 82%
rename from vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go
rename to vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
index 9e072e62d..85ceea1f2 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
@@ -15,9 +15,9 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/api/coordination/v1alpha1/generated.proto
+// source: k8s.io/api/coordination/v1alpha2/generated.proto
-package v1alpha1
+package v1alpha2
import (
fmt "fmt"
@@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
func (*LeaseCandidate) ProtoMessage() {}
func (*LeaseCandidate) Descriptor() ([]byte, []int) {
- return fileDescriptor_cb9e87df9da593c2, []int{0}
+ return fileDescriptor_c1ec5c989d262916, []int{0}
}
func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -77,7 +77,7 @@ var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
func (*LeaseCandidateList) ProtoMessage() {}
func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cb9e87df9da593c2, []int{1}
+ return fileDescriptor_c1ec5c989d262916, []int{1}
}
func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -105,7 +105,7 @@ var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
func (*LeaseCandidateSpec) ProtoMessage() {}
func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_cb9e87df9da593c2, []int{2}
+ return fileDescriptor_c1ec5c989d262916, []int{2}
}
func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -131,53 +131,52 @@ func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
func init() {
- proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate")
- proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList")
- proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec")
+ proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidate")
+ proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateList")
+ proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateSpec")
}
func init() {
- proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2)
+ proto.RegisterFile("k8s.io/api/coordination/v1alpha2/generated.proto", fileDescriptor_c1ec5c989d262916)
}
-var fileDescriptor_cb9e87df9da593c2 = []byte{
- // 570 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c,
- 0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65,
- 0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25,
- 0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00,
- 0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94,
- 0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b,
- 0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6,
- 0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f,
- 0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38,
- 0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22,
- 0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80,
- 0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd,
- 0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f,
- 0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5,
- 0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75,
- 0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71,
- 0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f,
- 0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf,
- 0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c,
- 0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0,
- 0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7,
- 0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f,
- 0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61,
- 0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c,
- 0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5,
- 0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e,
- 0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b,
- 0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8,
- 0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5,
- 0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7,
- 0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda,
- 0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5,
- 0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb,
- 0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda,
- 0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00,
+var fileDescriptor_c1ec5c989d262916 = []byte{
+ // 555 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0xd3, 0x4e,
+ 0x18, 0xc7, 0x9b, 0xdd, 0xf6, 0x47, 0x3b, 0xbf, 0xad, 0xd4, 0x01, 0x21, 0xf4, 0x90, 0x96, 0x9e,
+ 0x44, 0x70, 0x66, 0x77, 0x5d, 0x44, 0xf0, 0x96, 0xf5, 0x0f, 0x42, 0x57, 0x25, 0xab, 0x0b, 0xca,
+ 0x1e, 0x9c, 0x26, 0x8f, 0xe9, 0xd8, 0x26, 0x13, 0x92, 0xe9, 0x4a, 0x6f, 0xbe, 0x04, 0x5f, 0x56,
+ 0xf5, 0xb4, 0xc7, 0x3d, 0x15, 0x1b, 0xc1, 0x17, 0xe1, 0x49, 0x66, 0x9a, 0xf4, 0xaf, 0xa5, 0xc5,
+ 0x5b, 0xe7, 0x99, 0xe7, 0xf3, 0x99, 0xf9, 0x3e, 0x69, 0x82, 0x0e, 0x7b, 0x8f, 0x12, 0xc2, 0x05,
+ 0x65, 0x11, 0xa7, 0xae, 0x10, 0xb1, 0xc7, 0x43, 0x26, 0xb9, 0x08, 0xe9, 0xd5, 0x11, 0xeb, 0x47,
+ 0x5d, 0x76, 0x4c, 0x7d, 0x08, 0x21, 0x66, 0x12, 0x3c, 0x12, 0xc5, 0x42, 0x0a, 0xdc, 0x9c, 0x12,
+ 0x84, 0x45, 0x9c, 0x2c, 0x12, 0x24, 0x27, 0xea, 0xf7, 0x7d, 0x2e, 0xbb, 0x83, 0x0e, 0x71, 0x45,
+ 0x40, 0x7d, 0xe1, 0x0b, 0xaa, 0xc1, 0xce, 0xe0, 0xa3, 0x5e, 0xe9, 0x85, 0xfe, 0x35, 0x15, 0xd6,
+ 0xef, 0x6d, 0xbe, 0xc2, 0xea, 0xe1, 0xf5, 0x93, 0x79, 0x6f, 0xc0, 0xdc, 0x2e, 0x0f, 0x21, 0x1e,
+ 0xd2, 0xa8, 0xe7, 0xab, 0x42, 0x42, 0x03, 0x90, 0xec, 0x6f, 0x14, 0xdd, 0x44, 0xc5, 0x83, 0x50,
+ 0xf2, 0x00, 0xd6, 0x80, 0x87, 0xdb, 0x80, 0xc4, 0xed, 0x42, 0xc0, 0x56, 0xb9, 0xd6, 0x77, 0x03,
+ 0xdd, 0x6a, 0x03, 0x4b, 0xe0, 0x94, 0x85, 0x1e, 0xf7, 0x98, 0x04, 0xfc, 0x01, 0x95, 0xd5, 0xb5,
+ 0x3c, 0x26, 0x99, 0x69, 0x34, 0x8d, 0xbb, 0xff, 0x1f, 0x1f, 0x92, 0xf9, 0x04, 0x67, 0x76, 0x12,
+ 0xf5, 0x7c, 0x55, 0x48, 0x88, 0xea, 0x26, 0x57, 0x47, 0xe4, 0x55, 0xe7, 0x13, 0xb8, 0xf2, 0x0c,
+ 0x24, 0xb3, 0xf1, 0x68, 0xdc, 0x28, 0xa4, 0xe3, 0x06, 0x9a, 0xd7, 0x9c, 0x99, 0x15, 0x5f, 0xa0,
+ 0x62, 0x12, 0x81, 0x6b, 0xee, 0x69, 0xfb, 0x09, 0xd9, 0xf6, 0x7c, 0xc8, 0xf2, 0x0d, 0xcf, 0x23,
+ 0x70, 0xed, 0x83, 0xec, 0x84, 0xa2, 0x5a, 0x39, 0xda, 0xd7, 0xfa, 0x66, 0x20, 0xbc, 0xdc, 0xda,
+ 0xe6, 0x89, 0xc4, 0x97, 0x6b, 0x81, 0xc8, 0x6e, 0x81, 0x14, 0xad, 0xe3, 0xd4, 0xb2, 0xc3, 0xca,
+ 0x79, 0x65, 0x21, 0xcc, 0x5b, 0x54, 0xe2, 0x12, 0x82, 0xc4, 0xdc, 0x6b, 0xee, 0xaf, 0xcc, 0x6a,
+ 0xa7, 0x34, 0x76, 0x35, 0x93, 0x97, 0x5e, 0x28, 0x8d, 0x33, 0xb5, 0xb5, 0x7e, 0xed, 0xaf, 0x66,
+ 0x51, 0x41, 0x31, 0x45, 0x95, 0xbe, 0xaa, 0xbe, 0x64, 0x01, 0xe8, 0x30, 0x15, 0xfb, 0x76, 0xc6,
+ 0x57, 0xda, 0xf9, 0x86, 0x33, 0xef, 0xc1, 0xef, 0x50, 0x39, 0xe2, 0xa1, 0xff, 0x86, 0x07, 0x90,
+ 0xcd, 0x9b, 0xee, 0x16, 0xfe, 0x8c, 0xbb, 0xb1, 0x50, 0x98, 0x7d, 0xa0, 0x92, 0xbf, 0xce, 0x24,
+ 0xce, 0x4c, 0x87, 0x2f, 0x51, 0x25, 0x86, 0x10, 0x3e, 0x6b, 0xf7, 0xfe, 0xbf, 0xb9, 0xab, 0xea,
+ 0xe2, 0x4e, 0x6e, 0x71, 0xe6, 0x42, 0xfc, 0x18, 0x55, 0x3b, 0x3c, 0x64, 0xf1, 0xf0, 0x02, 0xe2,
+ 0x84, 0x8b, 0xd0, 0x2c, 0xea, 0xb4, 0x77, 0xb2, 0xb4, 0x55, 0x7b, 0x71, 0xd3, 0x59, 0xee, 0xc5,
+ 0x4f, 0x50, 0x0d, 0x82, 0x41, 0x5f, 0x0f, 0x3e, 0xe7, 0x4b, 0x9a, 0x37, 0x33, 0xbe, 0xf6, 0x74,
+ 0x65, 0xdf, 0x59, 0x23, 0xb0, 0x8b, 0xca, 0x89, 0x54, 0x6f, 0x8b, 0x3f, 0x34, 0xff, 0xd3, 0xf4,
+ 0xf3, 0xfc, 0x8f, 0x70, 0x9e, 0xd5, 0x7f, 0x8f, 0x1b, 0x0f, 0x36, 0x7f, 0x0d, 0xc8, 0x69, 0xbe,
+ 0x06, 0x4f, 0x3f, 0x9d, 0x1c, 0x73, 0x66, 0x62, 0xfb, 0xd9, 0x68, 0x62, 0x15, 0xae, 0x27, 0x56,
+ 0xe1, 0x66, 0x62, 0x15, 0xbe, 0xa4, 0x96, 0x31, 0x4a, 0x2d, 0xe3, 0x3a, 0xb5, 0x8c, 0x9b, 0xd4,
+ 0x32, 0x7e, 0xa4, 0x96, 0xf1, 0xf5, 0xa7, 0x55, 0x78, 0xdf, 0xdc, 0xf6, 0xd5, 0xfb, 0x13, 0x00,
+ 0x00, 0xff, 0xff, 0x7f, 0x15, 0x63, 0xd0, 0x18, 0x05, 0x00, 0x00,
}
func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) {
@@ -290,15 +289,11 @@ func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- if len(m.PreferredStrategies) > 0 {
- for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PreferredStrategies[iNdEx])
- copy(dAtA[i:], m.PreferredStrategies[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx])))
- i--
- dAtA[i] = 0x32
- }
- }
+ i -= len(m.Strategy)
+ copy(dAtA[i:], m.Strategy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy)))
+ i--
+ dAtA[i] = 0x32
i -= len(m.EmulationVersion)
copy(dAtA[i:], m.EmulationVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion)))
@@ -402,12 +397,8 @@ func (m *LeaseCandidateSpec) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.EmulationVersion)
n += 1 + l + sovGenerated(uint64(l))
- if len(m.PreferredStrategies) > 0 {
- for _, s := range m.PreferredStrategies {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
+ l = len(m.Strategy)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -454,7 +445,7 @@ func (this *LeaseCandidateSpec) String() string {
`RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`,
`BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`,
`EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`,
- `PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`,
+ `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`,
`}`,
}, "")
return s
@@ -899,7 +890,7 @@ func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -927,7 +918,7 @@ func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]))
+ m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
similarity index 79%
rename from vendor/k8s.io/api/coordination/v1alpha1/generated.proto
rename to vendor/k8s.io/api/coordination/v1alpha2/generated.proto
index 57895ad56..7e56cd7f9 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/coordination/v1alpha2/generated.proto
@@ -19,7 +19,7 @@ limitations under the License.
syntax = "proto2";
-package k8s.io.api.coordination.v1alpha1;
+package k8s.io.api.coordination.v1alpha2;
import "k8s.io/api/coordination/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
@@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
-option go_package = "k8s.io/api/coordination/v1alpha1";
+option go_package = "k8s.io/api/coordination/v1alpha2";
// LeaseCandidate defines a candidate for a Lease object.
// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
@@ -78,8 +78,8 @@ message LeaseCandidateSpec {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3;
// BinaryVersion is the binary version. It must be in a semver format without leading `v`.
- // This field is required when strategy is "OldestEmulationVersion"
- // +optional
+ // This field is required.
+ // +required
optional string binaryVersion = 4;
// EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
@@ -88,18 +88,13 @@ message LeaseCandidateSpec {
// +optional
optional string emulationVersion = 5;
- // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election.
- // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated
- // leader election to make a decision about the final election strategy. This follows as
- // - If all clients have strategy X as the first element in this list, strategy X will be used.
- // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y
- // will be used.
- // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader
- // election will not operate the Lease until resolved.
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
// +featureGate=CoordinatedLeaderElection
- // +listType=atomic
// +required
- repeated string preferredStrategies = 6;
+ optional string strategy = 6;
}
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha2/register.go
similarity index 95%
rename from vendor/k8s.io/api/coordination/v1alpha1/register.go
rename to vendor/k8s.io/api/coordination/v1alpha2/register.go
index 6e57905a1..86bb8e0f2 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/register.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,7 +26,7 @@ import (
const GroupName = "coordination.k8s.io"
// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha2/types.go
similarity index 75%
rename from vendor/k8s.io/api/coordination/v1alpha1/types.go
rename to vendor/k8s.io/api/coordination/v1alpha2/types.go
index 14066600c..2f53b097a 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/types.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/types.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Kubernetes Authors.
+Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1alpha2
import (
v1 "k8s.io/api/coordination/v1"
@@ -23,7 +23,7 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
// LeaseCandidate defines a candidate for a Lease object.
// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
@@ -61,31 +61,26 @@ type LeaseCandidateSpec struct {
// +optional
RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"`
// BinaryVersion is the binary version. It must be in a semver format without leading `v`.
- // This field is required when strategy is "OldestEmulationVersion"
- // +optional
- BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"`
+ // This field is required.
+ // +required
+ BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"`
// EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
// EmulationVersion must be less than or equal to BinaryVersion.
// This field is required when strategy is "OldestEmulationVersion"
// +optional
EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"`
- // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election.
- // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated
- // leader election to make a decision about the final election strategy. This follows as
- // - If all clients have strategy X as the first element in this list, strategy X will be used.
- // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y
- // will be used.
- // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader
- // election will not operate the Lease until resolved.
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
// (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
// +featureGate=CoordinatedLeaderElection
- // +listType=atomic
// +required
- PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"`
+ Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
// LeaseCandidateList is a list of Lease objects.
type LeaseCandidateList struct {
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
similarity index 51%
rename from vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go
rename to vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
index 0e52809c8..39534e6ad 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1alpha2
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
@@ -48,13 +48,13 @@ func (LeaseCandidateList) SwaggerDoc() map[string]string {
}
var map_LeaseCandidateSpec = map[string]string{
- "": "LeaseCandidateSpec is a specification of a Lease.",
- "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.",
- "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.",
- "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
- "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"",
- "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
- "preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
+ "": "LeaseCandidateSpec is a specification of a Lease.",
+ "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.",
+ "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.",
+ "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.",
+ "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.",
+ "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"",
+ "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.",
}
func (LeaseCandidateSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go
similarity index 93%
rename from vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go
rename to vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go
index 9cf15d21d..a20284797 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go
@@ -19,10 +19,9 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
import (
- v1 "k8s.io/api/coordination/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -97,11 +96,6 @@ func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) {
in, out := &in.RenewTime, &out.RenewTime
*out = (*in).DeepCopy()
}
- if in.PreferredStrategies != nil {
- in, out := &in.PreferredStrategies, &out.PreferredStrategies
- *out = make([]v1.CoordinatedLeaseStrategy, len(*in))
- copy(*out, *in)
- }
return
}
diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go
similarity index 96%
rename from vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go
rename to vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go
index f42bef65c..a99b9ab5b 100644
--- a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go
@@ -19,40 +19,40 @@ limitations under the License.
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) {
- return 1, 31
+ return 1, 32
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) {
- return 1, 34
+ return 1, 35
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) {
- return 1, 37
+ return 1, 38
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) {
- return 1, 31
+ return 1, 32
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) {
- return 1, 34
+ return 1, 35
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) {
- return 1, 37
+ return 1, 38
}
diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
index 5cf6f329f..62e86402e 100644
--- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go
+++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -23,7 +23,7 @@ const (
// webhook backend fails.
ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
- // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
+ // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
@@ -80,7 +80,7 @@ const (
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
- // annotation key prefix used to identify non-convertible json paths.
+ // NonConvertibleAnnotationPrefix is the annotation key prefix used to identify non-convertible json paths.
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
kubectlPrefix = "kubectl.kubernetes.io/"
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index 5654ee482..9d466c6d7 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -6758,1011 +6758,1015 @@ func init() {
}
var fileDescriptor_6c07b07c062484ab = []byte{
- // 16056 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7,
- 0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12,
- 0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd,
- 0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b,
- 0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb,
- 0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27,
- 0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97,
- 0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14,
- 0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17,
- 0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf,
- 0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7,
- 0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c,
- 0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f,
- 0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c,
- 0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15,
- 0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46,
- 0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e,
- 0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66,
- 0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40,
- 0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e,
- 0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2,
- 0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15,
- 0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d,
- 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0,
- 0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce,
- 0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae,
- 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18,
- 0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61,
- 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b,
- 0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1,
- 0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d,
- 0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd,
- 0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35,
- 0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9,
- 0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53,
- 0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46,
- 0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33,
- 0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96,
- 0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80,
- 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65,
- 0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c,
- 0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda,
- 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b,
- 0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51,
- 0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e,
- 0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6,
- 0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2,
- 0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90,
- 0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf,
- 0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7,
- 0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6,
- 0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80,
- 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02,
- 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7,
- 0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5,
- 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe,
- 0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a,
- 0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05,
- 0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe,
- 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67,
- 0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16,
- 0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02,
- 0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e,
- 0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07,
- 0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28,
- 0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11,
- 0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2,
- 0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd,
- 0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9,
- 0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95,
- 0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24,
- 0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd,
- 0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf,
- 0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c,
- 0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b,
- 0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34,
- 0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b,
- 0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c,
- 0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8,
- 0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce,
- 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd,
- 0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1,
- 0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b,
- 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a,
- 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0,
- 0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2,
- 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66,
- 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28,
- 0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04,
- 0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55,
- 0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10,
- 0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b,
- 0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18,
- 0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b,
- 0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e,
- 0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48,
- 0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04,
- 0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34,
- 0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80,
- 0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c,
- 0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52,
- 0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7,
- 0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f,
- 0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85,
- 0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a,
- 0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2,
- 0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1,
- 0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8,
- 0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b,
- 0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d,
- 0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd,
- 0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64,
- 0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9,
- 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e,
- 0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08,
- 0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91,
- 0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61,
- 0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e,
- 0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d,
- 0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35,
- 0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5,
- 0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37,
- 0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47,
- 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75,
- 0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48,
- 0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30,
- 0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08,
- 0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53,
- 0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f,
- 0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda,
- 0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f,
- 0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7,
- 0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d,
- 0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa,
- 0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a,
- 0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc,
- 0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23,
- 0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39,
- 0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d,
- 0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43,
- 0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb,
- 0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c,
- 0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40,
- 0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14,
- 0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00,
- 0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64,
- 0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43,
- 0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b,
- 0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58,
- 0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a,
- 0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33,
- 0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06,
- 0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08,
- 0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56,
- 0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27,
- 0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65,
- 0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35,
- 0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e,
- 0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3,
- 0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22,
- 0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a,
- 0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53,
- 0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74,
- 0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11,
- 0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95,
- 0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06,
- 0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2,
- 0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3,
- 0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b,
- 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb,
- 0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f,
- 0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51,
- 0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46,
- 0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0,
- 0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4,
- 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1,
- 0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63,
- 0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d,
- 0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65,
- 0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e,
- 0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f,
- 0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4,
- 0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b,
- 0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44,
- 0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1,
- 0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7,
- 0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02,
- 0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19,
- 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10,
- 0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59,
- 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37,
- 0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb,
- 0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28,
- 0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68,
- 0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd,
- 0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01,
- 0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74,
- 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63,
- 0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8,
- 0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77,
- 0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa,
- 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb,
- 0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b,
- 0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3,
- 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05,
- 0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c,
- 0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb,
- 0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d,
- 0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb,
- 0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3,
- 0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e,
- 0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc,
- 0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f,
- 0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd,
- 0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72,
- 0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0,
- 0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77,
- 0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb,
- 0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41,
- 0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8,
- 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2,
- 0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68,
- 0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86,
- 0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5,
- 0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4,
- 0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d,
- 0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70,
- 0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda,
- 0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f,
- 0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95,
- 0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b,
- 0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98,
- 0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8,
- 0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e,
- 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5,
- 0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5,
- 0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2,
- 0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c,
- 0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d,
- 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1,
- 0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3,
- 0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f,
- 0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28,
- 0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94,
- 0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3,
- 0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14,
- 0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06,
- 0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5,
- 0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa,
- 0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2,
- 0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05,
- 0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef,
- 0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d,
- 0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67,
- 0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2,
- 0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70,
- 0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04,
- 0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4,
- 0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81,
- 0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59,
- 0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76,
- 0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6,
- 0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36,
- 0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c,
- 0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89,
- 0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f,
- 0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d,
- 0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe,
- 0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a,
- 0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03,
- 0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9,
- 0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c,
- 0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43,
- 0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16,
- 0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8,
- 0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81,
- 0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23,
- 0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0,
- 0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38,
- 0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96,
- 0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55,
- 0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd,
- 0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60,
- 0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa,
- 0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15,
- 0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10,
- 0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6,
- 0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa,
- 0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88,
- 0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64,
- 0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2,
- 0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc,
- 0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1,
- 0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33,
- 0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81,
- 0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e,
- 0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe,
- 0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e,
- 0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85,
- 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e,
- 0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f,
- 0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02,
- 0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0,
- 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43,
- 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07,
- 0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd,
- 0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f,
- 0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41,
- 0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07,
- 0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a,
- 0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80,
- 0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97,
- 0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8,
- 0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4,
- 0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e,
- 0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48,
- 0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88,
- 0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67,
- 0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b,
- 0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3,
- 0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe,
- 0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96,
- 0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25,
- 0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97,
- 0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab,
- 0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e,
- 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8,
- 0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41,
- 0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8,
- 0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68,
- 0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56,
- 0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c,
- 0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe,
- 0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec,
- 0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c,
- 0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0,
- 0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0,
- 0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33,
- 0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81,
- 0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c,
- 0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f,
- 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87,
- 0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba,
- 0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77,
- 0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04,
- 0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0,
- 0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a,
- 0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82,
- 0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a,
- 0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81,
- 0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50,
- 0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc,
- 0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb,
- 0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee,
- 0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e,
- 0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98,
- 0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43,
- 0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b,
- 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a,
- 0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80,
- 0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac,
- 0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44,
- 0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6,
- 0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb,
- 0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58,
- 0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31,
- 0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51,
- 0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52,
- 0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f,
- 0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19,
- 0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79,
- 0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f,
- 0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2,
- 0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc,
- 0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46,
- 0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd,
- 0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30,
- 0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01,
- 0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32,
- 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d,
- 0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31,
- 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c,
- 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38,
- 0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8,
- 0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3,
- 0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b,
- 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4,
- 0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea,
- 0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72,
- 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2,
- 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b,
- 0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92,
- 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76,
- 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e,
- 0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89,
- 0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e,
- 0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f,
- 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7,
- 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a,
- 0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b,
- 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f,
- 0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59,
- 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85,
- 0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92,
- 0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb,
- 0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35,
- 0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41,
- 0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b,
- 0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae,
- 0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e,
- 0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3,
- 0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04,
- 0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24,
- 0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7,
- 0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4,
- 0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf,
- 0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27,
- 0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7,
- 0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58,
- 0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86,
- 0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51,
- 0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9,
- 0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d,
- 0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14,
- 0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba,
- 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97,
- 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e,
- 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27,
- 0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86,
- 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe,
- 0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e,
- 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95,
- 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26,
- 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0,
- 0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca,
- 0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42,
- 0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7,
- 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7,
- 0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd,
- 0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd,
- 0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0,
- 0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0,
- 0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f,
- 0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3,
- 0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3,
- 0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed,
- 0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa,
- 0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb,
- 0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25,
- 0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae,
- 0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30,
- 0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23,
- 0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb,
- 0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50,
- 0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34,
- 0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b,
- 0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26,
- 0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f,
- 0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c,
- 0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf,
- 0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27,
- 0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44,
- 0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf,
- 0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8,
- 0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2,
- 0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67,
- 0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62,
- 0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a,
- 0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4,
- 0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61,
- 0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3,
- 0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2,
- 0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17,
- 0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba,
- 0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf,
- 0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3,
- 0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4,
- 0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17,
- 0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f,
- 0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc,
- 0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74,
- 0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37,
- 0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76,
- 0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde,
- 0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39,
- 0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f,
- 0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5,
- 0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a,
- 0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25,
- 0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06,
- 0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06,
- 0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c,
- 0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74,
- 0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f,
- 0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46,
- 0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18,
- 0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8,
- 0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34,
- 0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a,
- 0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76,
- 0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66,
- 0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e,
- 0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b,
- 0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30,
- 0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9,
- 0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50,
- 0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58,
- 0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e,
- 0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82,
- 0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e,
- 0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4,
- 0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f,
- 0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42,
- 0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0,
- 0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16,
- 0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda,
- 0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb,
- 0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86,
- 0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38,
- 0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33,
- 0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52,
- 0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca,
- 0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32,
- 0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6,
- 0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37,
- 0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68,
- 0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf,
- 0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e,
- 0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44,
- 0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88,
- 0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48,
- 0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb,
- 0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44,
- 0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf,
- 0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8,
- 0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95,
- 0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89,
- 0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16,
- 0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30,
- 0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d,
- 0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c,
- 0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca,
- 0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d,
- 0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27,
- 0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a,
- 0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50,
- 0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80,
- 0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28,
- 0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28,
- 0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26,
- 0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b,
- 0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6,
- 0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4,
- 0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25,
- 0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b,
- 0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5,
- 0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf,
- 0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe,
- 0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb,
- 0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c,
- 0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f,
- 0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa,
- 0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37,
- 0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb,
- 0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e,
- 0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45,
- 0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c,
- 0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c,
- 0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6,
- 0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f,
- 0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2,
- 0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf,
- 0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6,
- 0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76,
- 0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20,
- 0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c,
- 0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57,
- 0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7,
- 0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab,
- 0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52,
- 0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7,
- 0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0,
- 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a,
- 0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7,
- 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6,
- 0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78,
- 0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39,
- 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad,
- 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b,
- 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7,
- 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe,
- 0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16,
- 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58,
- 0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb,
- 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22,
- 0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9,
- 0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1,
- 0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1,
- 0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb,
- 0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e,
- 0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91,
- 0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39,
- 0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8,
- 0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c,
- 0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e,
- 0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3,
- 0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae,
- 0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01,
- 0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa,
- 0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04,
- 0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0,
- 0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68,
- 0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c,
- 0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06,
- 0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca,
- 0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47,
- 0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8,
- 0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84,
- 0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba,
- 0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde,
- 0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35,
- 0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15,
- 0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41,
- 0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2,
- 0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2,
- 0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7,
- 0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17,
- 0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c,
- 0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8,
- 0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4,
- 0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72,
- 0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8,
- 0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed,
- 0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25,
- 0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f,
- 0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5,
- 0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c,
- 0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b,
- 0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95,
- 0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e,
- 0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97,
- 0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38,
- 0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2,
- 0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62,
- 0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36,
- 0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08,
- 0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98,
- 0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79,
- 0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f,
- 0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a,
- 0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5,
- 0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a,
- 0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a,
- 0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04,
- 0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae,
- 0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a,
- 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e,
- 0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3,
- 0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a,
- 0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc,
- 0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd,
- 0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c,
- 0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e,
- 0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21,
- 0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27,
- 0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b,
- 0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37,
- 0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0,
- 0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b,
- 0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6,
- 0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb,
- 0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd,
- 0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e,
- 0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c,
- 0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81,
- 0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd,
- 0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3,
- 0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c,
- 0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03,
- 0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88,
- 0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b,
- 0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6,
- 0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a,
- 0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb,
- 0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05,
- 0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50,
- 0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb,
- 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5,
- 0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e,
- 0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8,
- 0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40,
- 0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3,
- 0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5,
- 0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16,
- 0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92,
- 0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78,
- 0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80,
- 0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09,
- 0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56,
- 0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6,
- 0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9,
- 0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21,
- 0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66,
- 0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91,
- 0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac,
- 0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11,
- 0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2,
- 0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60,
- 0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f,
- 0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae,
- 0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a,
- 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f,
- 0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e,
- 0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c,
- 0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88,
- 0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0,
- 0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10,
- 0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4,
- 0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d,
- 0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6,
- 0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53,
- 0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7,
- 0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e,
- 0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7,
- 0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05,
- 0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd,
- 0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47,
- 0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11,
- 0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3,
- 0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35,
- 0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc,
- 0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42,
- 0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a,
- 0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3,
- 0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f,
- 0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15,
- 0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e,
- 0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb,
- 0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50,
- 0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b,
- 0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f,
- 0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c,
- 0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87,
- 0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87,
- 0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56,
- 0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a,
- 0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99,
- 0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e,
- 0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf,
- 0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c,
- 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3,
- 0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88,
- 0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c,
- 0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e,
- 0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3,
- 0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c,
- 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d,
- 0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb,
- 0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75,
- 0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae,
- 0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe,
- 0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40,
- 0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9,
- 0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b,
- 0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85,
- 0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb,
- 0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda,
- 0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f,
- 0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74,
- 0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf,
- 0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d,
- 0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b,
- 0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1,
- 0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa,
- 0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d,
- 0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed,
- 0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8,
- 0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93,
- 0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35,
- 0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13,
- 0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0,
- 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69,
- 0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17,
- 0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49,
- 0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5,
- 0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3,
- 0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0,
- 0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7,
- 0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63,
- 0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24,
- 0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63,
- 0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e,
- 0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f,
- 0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc,
- 0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12,
- 0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06,
- 0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18,
- 0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0,
- 0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99,
- 0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1,
- 0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9,
- 0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5,
- 0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2,
- 0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5,
- 0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71,
- 0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91,
- 0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f,
- 0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08,
- 0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb,
- 0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b,
- 0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda,
- 0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e,
- 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad,
- 0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d,
- 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61,
- 0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b,
- 0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f,
- 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53,
- 0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7,
- 0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4,
- 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98,
- 0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05,
- 0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec,
- 0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26,
- 0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3,
- 0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49,
- 0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74,
- 0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79,
- 0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6,
- 0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4,
- 0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07,
- 0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75,
- 0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35,
- 0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca,
- 0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf,
- 0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf,
- 0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a,
- 0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27,
- 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e,
- 0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54,
- 0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23,
- 0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a,
- 0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8,
- 0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62,
- 0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb,
- 0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7,
- 0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0,
- 0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98,
- 0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f,
- 0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28,
- 0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34,
- 0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98,
- 0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37,
- 0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33,
- 0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61,
- 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f,
- 0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee,
- 0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39,
- 0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e,
- 0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0,
- 0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05,
- 0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b,
- 0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54,
- 0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2,
- 0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57,
- 0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53,
- 0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e,
- 0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7,
- 0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31,
- 0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9,
- 0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3,
- 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23,
- 0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca,
- 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f,
- 0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7,
- 0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96,
- 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6,
- 0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80,
- 0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a,
- 0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12,
- 0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84,
- 0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2,
- 0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7,
- 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d,
- 0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41,
- 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee,
- 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9,
- 0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f,
- 0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d,
- 0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02,
- 0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31,
- 0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59,
- 0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21,
- 0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4,
- 0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05,
- 0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0,
- 0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0,
- 0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40,
- 0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13,
- 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13,
- 0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38,
- 0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43,
- 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65,
- 0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2,
- 0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab,
- 0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03,
- 0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91,
- 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e,
- 0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54,
- 0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c,
- 0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74,
- 0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3,
- 0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03,
- 0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0,
- 0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba,
- 0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d,
- 0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe,
- 0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88,
- 0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a,
- 0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a,
- 0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5,
- 0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2,
- 0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b,
- 0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2,
- 0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67,
- 0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84,
- 0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a,
- 0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a,
- 0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf,
- 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1,
- 0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7,
- 0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6,
- 0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07,
- 0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45,
- 0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5,
- 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9,
- 0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1,
- 0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6,
- 0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d,
- 0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e,
- 0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5,
- 0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21,
- 0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72,
- 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81,
- 0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe,
- 0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde,
- 0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94,
- 0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43,
- 0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d,
- 0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73,
- 0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e,
- 0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5,
- 0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34,
- 0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46,
- 0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25,
- 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc,
- 0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa,
- 0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c,
- 0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc,
- 0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92,
- 0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac,
- 0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5,
- 0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30,
- 0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3,
- 0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8,
- 0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4,
- 0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e,
- 0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20,
- 0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4,
- 0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f,
- 0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe,
- 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2,
- 0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc,
- 0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61,
- 0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5,
- 0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d,
- 0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b,
- 0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70,
- 0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2,
- 0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22,
- 0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d,
- 0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44,
- 0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86,
- 0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1,
- 0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c,
- 0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55,
- 0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c,
- 0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d,
- 0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94,
- 0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb,
- 0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2,
- 0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf,
- 0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10,
- 0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa,
- 0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d,
- 0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70,
- 0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06,
- 0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8,
- 0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e,
- 0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f,
- 0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad,
- 0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44,
- 0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00,
+ // 16114 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9,
+ 0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee,
+ 0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74,
+ 0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32,
+ 0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08,
+ 0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e,
+ 0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82,
+ 0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd,
+ 0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0,
+ 0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c,
+ 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38,
+ 0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6,
+ 0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb,
+ 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24,
+ 0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e,
+ 0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17,
+ 0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42,
+ 0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b,
+ 0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89,
+ 0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb,
+ 0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb,
+ 0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26,
+ 0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18,
+ 0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04,
+ 0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e,
+ 0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18,
+ 0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67,
+ 0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7,
+ 0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9,
+ 0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04,
+ 0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43,
+ 0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd,
+ 0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22,
+ 0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40,
+ 0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02,
+ 0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18,
+ 0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2,
+ 0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b,
+ 0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25,
+ 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77,
+ 0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a,
+ 0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b,
+ 0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15,
+ 0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa,
+ 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29,
+ 0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9,
+ 0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60,
+ 0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c,
+ 0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b,
+ 0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b,
+ 0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d,
+ 0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49,
+ 0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7,
+ 0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9,
+ 0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d,
+ 0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf,
+ 0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57,
+ 0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f,
+ 0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91,
+ 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38,
+ 0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56,
+ 0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa,
+ 0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3,
+ 0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d,
+ 0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc,
+ 0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56,
+ 0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9,
+ 0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7,
+ 0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6,
+ 0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f,
+ 0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab,
+ 0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc,
+ 0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2,
+ 0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4,
+ 0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6,
+ 0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57,
+ 0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71,
+ 0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b,
+ 0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9,
+ 0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46,
+ 0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43,
+ 0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e,
+ 0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15,
+ 0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf,
+ 0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f,
+ 0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d,
+ 0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f,
+ 0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62,
+ 0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48,
+ 0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11,
+ 0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40,
+ 0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff,
+ 0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31,
+ 0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd,
+ 0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45,
+ 0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2,
+ 0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd,
+ 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b,
+ 0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c,
+ 0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61,
+ 0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4,
+ 0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26,
+ 0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea,
+ 0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b,
+ 0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31,
+ 0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46,
+ 0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54,
+ 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18,
+ 0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1,
+ 0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f,
+ 0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3,
+ 0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58,
+ 0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73,
+ 0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc,
+ 0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f,
+ 0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d,
+ 0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19,
+ 0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70,
+ 0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5,
+ 0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06,
+ 0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12,
+ 0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47,
+ 0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c,
+ 0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d,
+ 0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8,
+ 0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b,
+ 0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39,
+ 0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18,
+ 0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f,
+ 0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58,
+ 0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99,
+ 0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d,
+ 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b,
+ 0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7,
+ 0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37,
+ 0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76,
+ 0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43,
+ 0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75,
+ 0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca,
+ 0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6,
+ 0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2,
+ 0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c,
+ 0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1,
+ 0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f,
+ 0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2,
+ 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c,
+ 0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa,
+ 0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99,
+ 0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6,
+ 0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29,
+ 0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab,
+ 0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52,
+ 0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca,
+ 0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca,
+ 0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25,
+ 0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4,
+ 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c,
+ 0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12,
+ 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75,
+ 0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e,
+ 0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14,
+ 0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a,
+ 0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab,
+ 0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c,
+ 0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85,
+ 0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83,
+ 0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70,
+ 0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59,
+ 0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58,
+ 0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5,
+ 0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96,
+ 0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09,
+ 0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb,
+ 0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda,
+ 0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88,
+ 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc,
+ 0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2,
+ 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75,
+ 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19,
+ 0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5,
+ 0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf,
+ 0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d,
+ 0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9,
+ 0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49,
+ 0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb,
+ 0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4,
+ 0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4,
+ 0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2,
+ 0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75,
+ 0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf,
+ 0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee,
+ 0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d,
+ 0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71,
+ 0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52,
+ 0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31,
+ 0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4,
+ 0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6,
+ 0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed,
+ 0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44,
+ 0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1,
+ 0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed,
+ 0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c,
+ 0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3,
+ 0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0,
+ 0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd,
+ 0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c,
+ 0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5,
+ 0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44,
+ 0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09,
+ 0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0,
+ 0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e,
+ 0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0,
+ 0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5,
+ 0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65,
+ 0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67,
+ 0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80,
+ 0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5,
+ 0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa,
+ 0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a,
+ 0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f,
+ 0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64,
+ 0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82,
+ 0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a,
+ 0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32,
+ 0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f,
+ 0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a,
+ 0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3,
+ 0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07,
+ 0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c,
+ 0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e,
+ 0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd,
+ 0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05,
+ 0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09,
+ 0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83,
+ 0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1,
+ 0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca,
+ 0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac,
+ 0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43,
+ 0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69,
+ 0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49,
+ 0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69,
+ 0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14,
+ 0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1,
+ 0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60,
+ 0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a,
+ 0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e,
+ 0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4,
+ 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c,
+ 0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7,
+ 0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa,
+ 0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d,
+ 0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55,
+ 0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8,
+ 0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20,
+ 0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4,
+ 0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37,
+ 0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d,
+ 0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4,
+ 0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68,
+ 0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05,
+ 0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07,
+ 0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff,
+ 0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d,
+ 0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28,
+ 0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8,
+ 0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c,
+ 0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e,
+ 0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d,
+ 0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d,
+ 0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46,
+ 0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0,
+ 0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1,
+ 0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09,
+ 0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b,
+ 0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99,
+ 0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb,
+ 0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee,
+ 0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28,
+ 0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66,
+ 0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63,
+ 0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02,
+ 0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21,
+ 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90,
+ 0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88,
+ 0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35,
+ 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30,
+ 0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8,
+ 0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7,
+ 0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79,
+ 0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5,
+ 0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18,
+ 0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72,
+ 0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e,
+ 0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8,
+ 0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31,
+ 0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88,
+ 0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39,
+ 0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3,
+ 0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5,
+ 0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31,
+ 0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58,
+ 0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a,
+ 0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e,
+ 0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5,
+ 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc,
+ 0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab,
+ 0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb,
+ 0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9,
+ 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa,
+ 0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05,
+ 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f,
+ 0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda,
+ 0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11,
+ 0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01,
+ 0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae,
+ 0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a,
+ 0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b,
+ 0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee,
+ 0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76,
+ 0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d,
+ 0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7,
+ 0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8,
+ 0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab,
+ 0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5,
+ 0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c,
+ 0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda,
+ 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e,
+ 0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e,
+ 0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e,
+ 0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07,
+ 0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5,
+ 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91,
+ 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9,
+ 0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a,
+ 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb,
+ 0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00,
+ 0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30,
+ 0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5,
+ 0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74,
+ 0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15,
+ 0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb,
+ 0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc,
+ 0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85,
+ 0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2,
+ 0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22,
+ 0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06,
+ 0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93,
+ 0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47,
+ 0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4,
+ 0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69,
+ 0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c,
+ 0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e,
+ 0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5,
+ 0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1,
+ 0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b,
+ 0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f,
+ 0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2,
+ 0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce,
+ 0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55,
+ 0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60,
+ 0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18,
+ 0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70,
+ 0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79,
+ 0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8,
+ 0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43,
+ 0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4,
+ 0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57,
+ 0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e,
+ 0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8,
+ 0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3,
+ 0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98,
+ 0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed,
+ 0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72,
+ 0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f,
+ 0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07,
+ 0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9,
+ 0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09,
+ 0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d,
+ 0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71,
+ 0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e,
+ 0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76,
+ 0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf,
+ 0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82,
+ 0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3,
+ 0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53,
+ 0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72,
+ 0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd,
+ 0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f,
+ 0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f,
+ 0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d,
+ 0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82,
+ 0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13,
+ 0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39,
+ 0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde,
+ 0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b,
+ 0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05,
+ 0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85,
+ 0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8,
+ 0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45,
+ 0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6,
+ 0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d,
+ 0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47,
+ 0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5,
+ 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c,
+ 0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce,
+ 0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc,
+ 0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b,
+ 0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41,
+ 0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2,
+ 0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab,
+ 0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4,
+ 0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee,
+ 0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79,
+ 0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf,
+ 0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97,
+ 0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3,
+ 0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30,
+ 0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc,
+ 0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16,
+ 0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43,
+ 0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0,
+ 0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10,
+ 0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38,
+ 0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11,
+ 0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c,
+ 0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d,
+ 0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6,
+ 0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb,
+ 0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8,
+ 0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4,
+ 0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1,
+ 0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9,
+ 0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e,
+ 0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68,
+ 0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7,
+ 0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0,
+ 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a,
+ 0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78,
+ 0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3,
+ 0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96,
+ 0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31,
+ 0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59,
+ 0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4,
+ 0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7,
+ 0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8,
+ 0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a,
+ 0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c,
+ 0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76,
+ 0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1,
+ 0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9,
+ 0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12,
+ 0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61,
+ 0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79,
+ 0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b,
+ 0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7,
+ 0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5,
+ 0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69,
+ 0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1,
+ 0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4,
+ 0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe,
+ 0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79,
+ 0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44,
+ 0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9,
+ 0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f,
+ 0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29,
+ 0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78,
+ 0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28,
+ 0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb,
+ 0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c,
+ 0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16,
+ 0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30,
+ 0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d,
+ 0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74,
+ 0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2,
+ 0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff,
+ 0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39,
+ 0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47,
+ 0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77,
+ 0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1,
+ 0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb,
+ 0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae,
+ 0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe,
+ 0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6,
+ 0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0,
+ 0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b,
+ 0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6,
+ 0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54,
+ 0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac,
+ 0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55,
+ 0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62,
+ 0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b,
+ 0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3,
+ 0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49,
+ 0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4,
+ 0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b,
+ 0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82,
+ 0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82,
+ 0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f,
+ 0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50,
+ 0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1,
+ 0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5,
+ 0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1,
+ 0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f,
+ 0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03,
+ 0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39,
+ 0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01,
+ 0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c,
+ 0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb,
+ 0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58,
+ 0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6,
+ 0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90,
+ 0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3,
+ 0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d,
+ 0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04,
+ 0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd,
+ 0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53,
+ 0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a,
+ 0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc,
+ 0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09,
+ 0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75,
+ 0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d,
+ 0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50,
+ 0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44,
+ 0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce,
+ 0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6,
+ 0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05,
+ 0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb,
+ 0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd,
+ 0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92,
+ 0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a,
+ 0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a,
+ 0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64,
+ 0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83,
+ 0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe,
+ 0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f,
+ 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3,
+ 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f,
+ 0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb,
+ 0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06,
+ 0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9,
+ 0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89,
+ 0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f,
+ 0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9,
+ 0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4,
+ 0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a,
+ 0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f,
+ 0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03,
+ 0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03,
+ 0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03,
+ 0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7,
+ 0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41,
+ 0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6,
+ 0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a,
+ 0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9,
+ 0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b,
+ 0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8,
+ 0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7,
+ 0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6,
+ 0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77,
+ 0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86,
+ 0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa,
+ 0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd,
+ 0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93,
+ 0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4,
+ 0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a,
+ 0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4,
+ 0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a,
+ 0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54,
+ 0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e,
+ 0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5,
+ 0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30,
+ 0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb,
+ 0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e,
+ 0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63,
+ 0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20,
+ 0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c,
+ 0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb,
+ 0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0,
+ 0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85,
+ 0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60,
+ 0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9,
+ 0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c,
+ 0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f,
+ 0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad,
+ 0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf,
+ 0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53,
+ 0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d,
+ 0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39,
+ 0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f,
+ 0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5,
+ 0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8,
+ 0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a,
+ 0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b,
+ 0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4,
+ 0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba,
+ 0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14,
+ 0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36,
+ 0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b,
+ 0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05,
+ 0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d,
+ 0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1,
+ 0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23,
+ 0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15,
+ 0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d,
+ 0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d,
+ 0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4,
+ 0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70,
+ 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7,
+ 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39,
+ 0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22,
+ 0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e,
+ 0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09,
+ 0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82,
+ 0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1,
+ 0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30,
+ 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56,
+ 0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c,
+ 0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40,
+ 0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37,
+ 0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6,
+ 0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6,
+ 0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8,
+ 0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae,
+ 0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d,
+ 0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10,
+ 0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18,
+ 0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3,
+ 0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb,
+ 0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c,
+ 0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc,
+ 0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e,
+ 0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62,
+ 0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98,
+ 0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d,
+ 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47,
+ 0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76,
+ 0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8,
+ 0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75,
+ 0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93,
+ 0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a,
+ 0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22,
+ 0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1,
+ 0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1,
+ 0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9,
+ 0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea,
+ 0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d,
+ 0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40,
+ 0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c,
+ 0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5,
+ 0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27,
+ 0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a,
+ 0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f,
+ 0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3,
+ 0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c,
+ 0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60,
+ 0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c,
+ 0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f,
+ 0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9,
+ 0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9,
+ 0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad,
+ 0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc,
+ 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a,
+ 0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75,
+ 0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0,
+ 0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35,
+ 0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43,
+ 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62,
+ 0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d,
+ 0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40,
+ 0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed,
+ 0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8,
+ 0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24,
+ 0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49,
+ 0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53,
+ 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f,
+ 0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49,
+ 0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37,
+ 0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea,
+ 0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c,
+ 0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a,
+ 0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe,
+ 0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d,
+ 0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6,
+ 0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0,
+ 0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19,
+ 0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e,
+ 0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d,
+ 0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c,
+ 0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92,
+ 0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51,
+ 0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7,
+ 0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4,
+ 0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a,
+ 0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09,
+ 0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b,
+ 0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66,
+ 0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a,
+ 0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d,
+ 0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01,
+ 0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc,
+ 0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39,
+ 0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74,
+ 0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd,
+ 0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7,
+ 0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47,
+ 0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea,
+ 0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71,
+ 0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d,
+ 0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62,
+ 0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b,
+ 0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e,
+ 0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d,
+ 0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad,
+ 0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2,
+ 0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a,
+ 0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35,
+ 0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e,
+ 0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3,
+ 0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb,
+ 0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc,
+ 0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a,
+ 0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69,
+ 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef,
+ 0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd,
+ 0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02,
+ 0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50,
+ 0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa,
+ 0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87,
+ 0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e,
+ 0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c,
+ 0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29,
+ 0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57,
+ 0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc,
+ 0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b,
+ 0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44,
+ 0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0,
+ 0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f,
+ 0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d,
+ 0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b,
+ 0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05,
+ 0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d,
+ 0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11,
+ 0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d,
+ 0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01,
+ 0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13,
+ 0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69,
+ 0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38,
+ 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1,
+ 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78,
+ 0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f,
+ 0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47,
+ 0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84,
+ 0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8,
+ 0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6,
+ 0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47,
+ 0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5,
+ 0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd,
+ 0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f,
+ 0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74,
+ 0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2,
+ 0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b,
+ 0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26,
+ 0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28,
+ 0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec,
+ 0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7,
+ 0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7,
+ 0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76,
+ 0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08,
+ 0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad,
+ 0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55,
+ 0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82,
+ 0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a,
+ 0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51,
+ 0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda,
+ 0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73,
+ 0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7,
+ 0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27,
+ 0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5,
+ 0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e,
+ 0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f,
+ 0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35,
+ 0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59,
+ 0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84,
+ 0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e,
+ 0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70,
+ 0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71,
+ 0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68,
+ 0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26,
+ 0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a,
+ 0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90,
+ 0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46,
+ 0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6,
+ 0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67,
+ 0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27,
+ 0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03,
+ 0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10,
+ 0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9,
+ 0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93,
+ 0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40,
+ 0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b,
+ 0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83,
+ 0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75,
+ 0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67,
+ 0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73,
+ 0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb,
+ 0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d,
+ 0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf,
+ 0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae,
+ 0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c,
+ 0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71,
+ 0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc,
+ 0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02,
+ 0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83,
+ 0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a,
+ 0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba,
+ 0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a,
+ 0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c,
+ 0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc,
+ 0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b,
+ 0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2,
+ 0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3,
+ 0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf,
+ 0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d,
+ 0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40,
+ 0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71,
+ 0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64,
+ 0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72,
+ 0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f,
+ 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58,
+ 0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43,
+ 0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8,
+ 0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66,
+ 0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58,
+ 0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a,
+ 0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2,
+ 0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33,
+ 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0,
+ 0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1,
+ 0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60,
+ 0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c,
+ 0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e,
+ 0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e,
+ 0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08,
+ 0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42,
+ 0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03,
+ 0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6,
+ 0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1,
+ 0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb,
+ 0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32,
+ 0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3,
+ 0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc,
+ 0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e,
+ 0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda,
+ 0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6,
+ 0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d,
+ 0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09,
+ 0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6,
+ 0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58,
+ 0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c,
+ 0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd,
+ 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a,
+ 0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a,
+ 0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15,
+ 0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e,
+ 0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3,
+ 0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a,
+ 0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23,
+ 0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf,
+ 0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11,
+ 0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc,
+ 0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98,
+ 0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f,
+ 0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3,
+ 0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8,
+ 0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb,
+ 0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98,
+ 0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21,
+ 0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce,
+ 0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46,
+ 0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75,
+ 0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44,
+ 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5,
+ 0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39,
+ 0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb,
+ 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9,
+ 0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9,
+ 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a,
+ 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07,
+ 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9,
+ 0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5,
+ 0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5,
+ 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29,
+ 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff,
+ 0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5,
+ 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d,
+ 0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda,
+ 0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4,
+ 0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99,
+ 0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c,
+ 0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84,
+ 0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e,
+ 0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34,
+ 0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46,
+ 0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6,
+ 0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c,
+ 0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c,
+ 0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a,
+ 0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6,
+ 0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe,
+ 0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7,
+ 0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4,
+ 0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5,
+ 0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b,
+ 0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc,
+ 0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3,
+ 0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6,
+ 0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb,
+ 0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d,
+ 0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27,
+ 0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac,
+ 0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8,
+ 0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94,
+ 0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59,
+ 0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae,
+ 0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf,
+ 0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b,
+ 0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f,
+ 0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf,
+ 0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71,
+ 0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e,
+ 0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2,
+ 0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3,
+ 0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3,
+ 0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e,
+ 0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c,
+ 0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e,
+ 0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a,
+ 0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d,
+ 0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3,
+ 0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8,
+ 0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38,
+ 0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61,
+ 0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd,
+ 0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1,
+ 0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59,
+ 0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5,
+ 0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38,
+ 0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7,
+ 0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5,
+ 0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a,
+ 0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb,
+ 0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d,
+ 0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89,
+ 0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f,
+ 0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5,
+ 0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99,
+ 0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16,
+ 0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14,
+ 0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81,
+ 0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e,
+ 0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15,
+ 0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59,
+ 0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f,
+ 0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2,
+ 0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9,
+ 0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0,
+ 0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15,
+ 0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58,
+ 0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60,
+ 0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22,
+ 0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95,
+ 0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38,
+ 0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6,
+ 0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8,
+ 0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77,
+ 0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a,
+ 0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69,
+ 0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb,
+ 0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e,
+ 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08,
+ 0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e,
+ 0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47,
+ 0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85,
+ 0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6,
+ 0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59,
+ 0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c,
+ 0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00,
+ 0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35,
+ 0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9,
+ 0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c,
+ 0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34,
+ 0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0,
+ 0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31,
+ 0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8,
+ 0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78,
+ 0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3,
+ 0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea,
+ 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89,
+ 0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a,
+ 0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34,
+ 0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb,
+ 0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0,
+ 0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6,
+ 0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f,
+ 0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff,
+ 0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3,
+ 0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24,
+ 0x01, 0x00,
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -16016,6 +16020,13 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Stream != nil {
+ i -= len(*m.Stream)
+ copy(dAtA[i:], *m.Stream)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Stream)))
+ i--
+ dAtA[i] = 0x52
+ }
i--
if m.InsecureSkipTLSVerifyBackend {
dAtA[i] = 1
@@ -16322,6 +16333,13 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.SELinuxChangePolicy != nil {
+ i -= len(*m.SELinuxChangePolicy)
+ copy(dAtA[i:], *m.SELinuxChangePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SELinuxChangePolicy)))
+ i--
+ dAtA[i] = 0x6a
+ }
if m.SupplementalGroupsPolicy != nil {
i -= len(*m.SupplementalGroupsPolicy)
copy(dAtA[i:], *m.SupplementalGroupsPolicy)
@@ -16488,6 +16506,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Resources != nil {
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc2
+ }
if len(m.ResourceClaims) > 0 {
for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -24737,6 +24769,10 @@ func (m *PodLogOptions) Size() (n int) {
n += 1 + sovGenerated(uint64(*m.LimitBytes))
}
n += 2
+ if m.Stream != nil {
+ l = len(*m.Stream)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -24885,6 +24921,10 @@ func (m *PodSecurityContext) Size() (n int) {
l = len(*m.SupplementalGroupsPolicy)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.SELinuxChangePolicy != nil {
+ l = len(*m.SELinuxChangePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -25059,6 +25099,10 @@ func (m *PodSpec) Size() (n int) {
n += 2 + l + sovGenerated(uint64(l))
}
}
+ if m.Resources != nil {
+ l = m.Resources.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -29088,6 +29132,7 @@ func (this *PodLogOptions) String() string {
`TailLines:` + valueToStringGenerated(this.TailLines) + `,`,
`LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`,
`InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`,
+ `Stream:` + valueToStringGenerated(this.Stream) + `,`,
`}`,
}, "")
return s
@@ -29187,6 +29232,7 @@ func (this *PodSecurityContext) String() string {
`SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`,
`AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`,
`SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`,
+ `SELinuxChangePolicy:` + valueToStringGenerated(this.SELinuxChangePolicy) + `,`,
`}`,
}, "")
return s
@@ -29320,6 +29366,7 @@ func (this *PodSpec) String() string {
`HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`,
`SchedulingGates:` + repeatedStringForSchedulingGates + `,`,
`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
+ `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
`}`,
}, "")
return s
@@ -56954,6 +57001,39 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error {
}
}
m.InsecureSkipTLSVerifyBackend = bool(v != 0)
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Stream = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -58122,6 +58202,39 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex])
m.SupplementalGroupsPolicy = &s
iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SELinuxChangePolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := PodSELinuxChangePolicy(dAtA[iNdEx:postIndex])
+ m.SELinuxChangePolicy = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -59611,6 +59724,42 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 40:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Resources == nil {
+ m.Resources = &ResourceRequirements{}
+ }
+ if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 68ac80ed0..08706987c 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -181,7 +181,6 @@ message AzureFileVolumeSource {
}
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
-// Deprecated in 1.7, please use the bindings subresource of pods instead.
message Binding {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@@ -192,7 +191,7 @@ message Binding {
optional ObjectReference target = 2;
}
-// Represents storage that is managed by an external CSI volume driver (Beta feature)
+// Represents storage that is managed by an external CSI volume driver
message CSIPersistentVolumeSource {
// driver is the name of the driver to use for this volume.
// Required.
@@ -1071,7 +1070,7 @@ message ContainerStatus {
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
- // +featureGate=InPlacePodVerticalScaling
+ // +featureGate=InPlacePodVerticalScalingAllocatedStatus
// +optional
map allocatedResources = 10;
@@ -1870,6 +1869,7 @@ message GCEPersistentDiskVolumeSource {
optional bool readOnly = 4;
}
+// GRPCAction specifies an action involving a GRPC service.
message GRPCAction {
// Port number of the gRPC service. Number must be in the range 1 to 65535.
optional int32 port = 1;
@@ -2203,21 +2203,21 @@ message Lifecycle {
// LifecycleHandler defines a specific action that should be taken in a lifecycle
// hook. One and only one of the fields, except TCPSocket must be specified.
message LifecycleHandler {
- // Exec specifies the action to take.
+ // Exec specifies a command to execute in the container.
// +optional
optional ExecAction exec = 1;
- // HTTPGet specifies the http request to perform.
+ // HTTPGet specifies an HTTP GET request to perform.
// +optional
optional HTTPGetAction httpGet = 2;
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- // for the backward compatibility. There are no validation of this field and
- // lifecycle hooks will fail in runtime when tcp handler is specified.
+ // for backward compatibility. There is no validation of this field and
+ // lifecycle hooks will fail at runtime when it is specified.
// +optional
optional TCPSocketAction tcpSocket = 3;
- // Sleep represents the duration that the container should sleep before being terminated.
+ // Sleep represents a duration that the container should sleep.
// +featureGate=PodLifecycleSleepAction
// +optional
optional SleepAction sleep = 4;
@@ -2346,13 +2346,23 @@ message LoadBalancerStatus {
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +structType=atomic
message LocalObjectReference {
// Name of the referent.
// This field is effectively required, but due to backwards compatibility is
// allowed to be empty. Instances of this type with an empty value here are
// almost certainly wrong.
- // TODO: Add other useful fields. apiVersion, kind, uid?
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
// +default=""
@@ -2361,7 +2371,7 @@ message LocalObjectReference {
optional string name = 1;
}
-// Local represents directly-attached storage with node affinity (Beta feature)
+// Local represents directly-attached storage with node affinity
message LocalVolumeSource {
// path of the full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
@@ -2438,12 +2448,15 @@ message NamespaceCondition {
// Status of the condition, one of True, False, Unknown.
optional string status = 2;
+ // Last time the condition transitioned from one status to another.
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+ // Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
optional string reason = 5;
+ // Human-readable message indicating details about last transition.
// +optional
optional string message = 6;
}
@@ -2783,7 +2796,7 @@ message NodeStatus {
optional string phase = 3;
// Conditions is an array of current observed node conditions.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
@@ -2793,7 +2806,7 @@ message NodeStatus {
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
// Note: This field is declared as mergeable, but the merge key is not sufficiently
// unique, which can cause data corruption when it is merged. Callers should instead
// use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
@@ -2813,7 +2826,7 @@ message NodeStatus {
optional NodeDaemonEndpoints daemonEndpoints = 6;
// Set of ids/uuids to uniquely identify the node.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#info
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#info
// +optional
optional NodeSystemInfo nodeInfo = 7;
@@ -3001,8 +3014,13 @@ message PersistentVolumeClaim {
// PersistentVolumeClaimCondition contains details about state of pvc
message PersistentVolumeClaimCondition {
+ // Type is the type of the condition.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
optional string type = 1;
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
optional string status = 2;
// lastProbeTime is the time we probed the condition.
@@ -3280,12 +3298,16 @@ message PersistentVolumeList {
message PersistentVolumeSource {
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1;
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2;
@@ -3300,6 +3322,7 @@ message PersistentVolumeSource {
// glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
optional GlusterfsPersistentVolumeSource glusterfs = 4;
@@ -3310,6 +3333,7 @@ message PersistentVolumeSource {
optional NFSVolumeSource nfs = 5;
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
optional RBDPersistentVolumeSource rbd = 6;
@@ -3320,11 +3344,14 @@ message PersistentVolumeSource {
optional ISCSIPersistentVolumeSource iscsi = 7;
// cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
optional CinderPersistentVolumeSource cinder = 8;
- // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
// +optional
optional CephFSPersistentVolumeSource cephfs = 9;
@@ -3332,39 +3359,53 @@ message PersistentVolumeSource {
// +optional
optional FCVolumeSource fc = 10;
- // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+ // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
// +optional
optional FlockerVolumeSource flocker = 11;
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
// +optional
optional FlexPersistentVolumeSource flexVolume = 12;
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
// +optional
optional AzureFilePersistentVolumeSource azureFile = 13;
- // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
// +optional
optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
- // quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
// +optional
optional QuobyteVolumeSource quobyte = 15;
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
// +optional
optional AzureDiskVolumeSource azureDisk = 16;
- // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
- // portworxVolume represents a portworx volume attached and mounted on kubelets host machine
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
// +optional
optional PortworxVolumeSource portworxVolume = 18;
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
// +optional
optional ScaleIOPersistentVolumeSource scaleIO = 19;
@@ -3372,12 +3413,13 @@ message PersistentVolumeSource {
// +optional
optional LocalVolumeSource local = 20;
- // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
+ // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
// More info: https://examples.k8s.io/volumes/storageos/README.md
// +optional
optional StorageOSPersistentVolumeSource storageos = 21;
- // csi represents storage that is handled by an external CSI driver (Beta feature).
+ // csi represents storage that is handled by an external CSI driver.
// +optional
optional CSIPersistentVolumeSource csi = 22;
}
@@ -3710,9 +3752,11 @@ message PodDNSConfig {
// PodDNSConfigOption defines DNS resolver options of a pod.
message PodDNSConfigOption {
+ // Name is this DNS resolver option's name.
// Required.
optional string name = 1;
+ // Value is this DNS resolver option's value.
// +optional
optional string value = 2;
}
@@ -3803,7 +3847,8 @@ message PodLogOptions {
optional bool timestamps = 6;
// If set, the number of lines from the end of the logs to show. If not specified,
- // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime.
+ // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
// +optional
optional int64 tailLines = 7;
@@ -3821,6 +3866,14 @@ message PodLogOptions {
// the actual log data coming from the real kubelet).
// +optional
optional bool insecureSkipTLSVerifyBackend = 9;
+
+ // Specify which container log stream to return to the client.
+ // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
+ // are returned interleaved.
+ // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+ // +featureGate=PodLogsQuerySplitStreams
+ // +optional
+ optional string stream = 10;
}
// PodOS defines the OS parameters of a pod.
@@ -4029,6 +4082,33 @@ message PodSecurityContext {
// Note that this field cannot be set when spec.os.name is windows.
// +optional
optional AppArmorProfile appArmorProfile = 11;
+
+ // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+ // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+ // Valid values are "MountOption" and "Recursive".
+ //
+ // "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+ // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+ //
+ // "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+ // This requires all Pods that share the same volume to use the same SELinux label.
+ // It is not possible to share the same volume among privileged and unprivileged Pods.
+ // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ // CSIDriver instance. Other volumes are always re-labelled recursively.
+ // "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+ //
+ // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+ // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+ // and "Recursive" for all other volumes.
+ //
+ // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+ //
+ // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+ // Note that this field cannot be set when spec.os.name is windows.
+ // +featureGate=SELinuxChangePolicy
+ // +optional
+ optional string seLinuxChangePolicy = 13;
}
// Describes the class of pods that should avoid this node.
@@ -4386,6 +4466,21 @@ message PodSpec {
// +featureGate=DynamicResourceAllocation
// +optional
repeated PodResourceClaim resourceClaims = 39;
+
+ // Resources is the total amount of CPU and Memory resources required by all
+ // containers in the pod. It supports specifying Requests and Limits for
+ // "cpu" and "memory" resource names only. ResourceClaims are not supported.
+ //
+ // This field enables fine-grained control over resource allocation for the
+ // entire pod, allowing resource sharing among containers in a pod.
+ // TODO: For beta graduation, expand this comment with a detailed explanation.
+ //
+ // This is an alpha field and requires enabling the PodLevelResources feature
+ // gate.
+ //
+ // +featureGate=PodLevelResources
+ // +optional
+ optional ResourceRequirements resources = 40;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
@@ -4477,14 +4572,26 @@ message PodStatus {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
- // The list has one entry per init container in the manifest. The most recent successful
+ // Statuses of init containers in this pod. The most recent successful non-restartable
// init container will have ready = true, the most recently started container will have
// startTime set.
- // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+ // Each init container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
// +listType=atomic
repeated ContainerStatus initContainerStatuses = 10;
- // The list has one entry per container in the manifest.
+ // Statuses of containers in this pod.
+ // Each container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
// +listType=atomic
@@ -4496,7 +4603,14 @@ message PodStatus {
// +optional
optional string qosClass = 9;
- // Status for any ephemeral containers that have run in this pod.
+ // Statuses for any ephemeral containers that have run in this pod.
+ // Each ephemeral container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
// +listType=atomic
repeated ContainerStatus ephemeralContainerStatuses = 13;
@@ -4571,6 +4685,7 @@ message PodTemplateSpec {
optional PodSpec spec = 2;
}
+// PortStatus represents the error condition of a service port
message PortStatus {
// Port is the port number of the service port of which status is recorded here
optional int32 port = 1;
@@ -4695,19 +4810,19 @@ message Probe {
// ProbeHandler defines a specific action that should be taken in a probe.
// One and only one of the fields must be specified.
message ProbeHandler {
- // Exec specifies the action to take.
+ // Exec specifies a command to execute in the container.
// +optional
optional ExecAction exec = 1;
- // HTTPGet specifies the http request to perform.
+ // HTTPGet specifies an HTTP GET request to perform.
// +optional
optional HTTPGetAction httpGet = 2;
- // TCPSocket specifies an action involving a TCP port.
+ // TCPSocket specifies a connection to a TCP port.
// +optional
optional TCPSocketAction tcpSocket = 3;
- // GRPC specifies an action involving a GRPC port.
+ // GRPC specifies a GRPC HealthCheckRequest.
// +optional
optional GRPCAction grpc = 4;
}
@@ -5036,7 +5151,7 @@ message ResourceFieldSelector {
}
// ResourceHealth represents the health of a resource. It has the latest device health information.
-// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
+// This is a part of KEP https://kep.k8s.io/4680.
message ResourceHealth {
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
optional string resourceID = 1;
@@ -5145,15 +5260,18 @@ message ResourceRequirements {
repeated ResourceClaim claims = 3;
}
+// ResourceStatus represents the status of a single resource allocated to a Pod.
message ResourceStatus {
- // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
+ // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
+ // For DRA resources, the value must be "claim:/".
+ // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
// +required
optional string name = 1;
- // List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
- // At a minimum, ResourceID must uniquely identify the Resource
- // allocated to the Pod on the Node for the lifetime of a Pod.
- // See ResourceID type for it's definition.
+ // List of unique resources health. Each element in the list contains an unique resource ID and its health.
+ // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
+ // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
+ // See ResourceID type definition for a specific format it has in various use cases.
// +listType=map
// +listMapKey=resourceID
repeated ResourceHealth resources = 2;
@@ -5611,6 +5729,8 @@ message ServiceAccount {
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
+ // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
+ // Prefer separate namespaces to isolate access to mounted secrets.
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
@@ -5996,7 +6116,7 @@ message ServiceSpec {
// not set, the implementation will apply its default routing strategy. If set
// to "PreferClose", implementations should prioritize endpoints that are
// topologically close (e.g., same zone).
- // This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ // This is a beta field and requires enabling ServiceTrafficDistribution feature.
// +featureGate=ServiceTrafficDistribution
// +optional
optional string trafficDistribution = 23;
@@ -6323,6 +6443,20 @@ message TopologySpreadConstraint {
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
+// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
+// and the version of the actual struct is irrelevant.
+// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +structType=atomic
message TypedLocalObjectReference {
// APIGroup is the group for the resource being referenced.
@@ -6338,6 +6472,7 @@ message TypedLocalObjectReference {
optional string name = 3;
}
+// TypedObjectReference contains enough information to let you locate the typed referenced object
message TypedObjectReference {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
@@ -6538,18 +6673,22 @@ message VolumeSource {
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3;
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
// gitRepo represents a git repository at a particular revision.
- // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
// +optional
@@ -6572,6 +6711,7 @@ message VolumeSource {
optional ISCSIVolumeSource iscsi = 8;
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
optional GlusterfsVolumeSource glusterfs = 9;
@@ -6583,25 +6723,31 @@ message VolumeSource {
optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10;
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
optional RBDVolumeSource rbd = 11;
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
// +optional
optional FlexVolumeSource flexVolume = 12;
// cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
optional CinderVolumeSource cinder = 13;
- // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
// +optional
optional CephFSVolumeSource cephfs = 14;
- // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
// +optional
optional FlockerVolumeSource flocker = 15;
@@ -6614,6 +6760,8 @@ message VolumeSource {
optional FCVolumeSource fc = 17;
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
// +optional
optional AzureFileVolumeSource azureFile = 18;
@@ -6621,37 +6769,48 @@ message VolumeSource {
// +optional
optional ConfigMapVolumeSource configMap = 19;
- // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
// +optional
optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
- // quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
// +optional
optional QuobyteVolumeSource quobyte = 21;
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
// +optional
optional AzureDiskVolumeSource azureDisk = 22;
- // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
// projected items for all in one resources secrets, configmaps, and downward API
optional ProjectedVolumeSource projected = 26;
- // portworxVolume represents a portworx volume attached and mounted on kubelets host machine
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
// +optional
optional PortworxVolumeSource portworxVolume = 24;
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
// +optional
optional ScaleIOVolumeSource scaleIO = 25;
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
// +optional
optional StorageOSVolumeSource storageos = 27;
- // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
+ // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
// +optional
optional CSIVolumeSource csi = 28;
diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go
index ee5335ee8..609cadc7a 100644
--- a/vendor/k8s.io/api/core/v1/objectreference.go
+++ b/vendor/k8s.io/api/core/v1/objectreference.go
@@ -20,7 +20,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
-// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
+// SetGroupVersionKind allows clients to preemptively get a reference to an API object and pass it to places that
// intend only to get a reference to that object. This simplifies the event recording interface.
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index 3a74138ba..fb2c1c745 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -63,16 +63,20 @@ type VolumeSource struct {
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// gitRepo represents a git repository at a particular revision.
- // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
// +optional
@@ -91,6 +95,7 @@ type VolumeSource struct {
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
@@ -100,21 +105,27 @@ type VolumeSource struct {
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
- // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
- // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
+ // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// downwardAPI represents downward API about the pod that should populate this volume
@@ -124,34 +135,47 @@ type VolumeSource struct {
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// configMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
- // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
- // quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
- // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// projected items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
- // portworxVolume represents a portworx volume attached and mounted on kubelets host machine
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
- // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
+ // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
// +optional
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
// ephemeral represents a volume that is handled by a cluster storage driver.
@@ -219,11 +243,15 @@ type PersistentVolumeClaimVolumeSource struct {
type PersistentVolumeSource struct {
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
@@ -236,6 +264,7 @@ type PersistentVolumeSource struct {
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
@@ -244,6 +273,7 @@ type PersistentVolumeSource struct {
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
@@ -252,50 +282,68 @@ type PersistentVolumeSource struct {
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
- // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
- // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
+ // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
// +optional
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
- // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
- // quobyte represents a Quobyte mount on the host that shares a pod's lifetime
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
- // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
- // portworxVolume represents a portworx volume attached and mounted on kubelets host machine
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
- // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
+ // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
// More info: https://examples.k8s.io/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
- // csi represents storage that is handled by an external CSI driver (Beta feature).
+ // csi represents storage that is handled by an external CSI driver.
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
@@ -582,6 +630,7 @@ type PersistentVolumeClaimSpec struct {
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
}
+// TypedObjectReference contains enough information to let you locate the typed referenced object
type TypedObjectReference struct {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
@@ -688,8 +737,13 @@ type ModifyVolumeStatus struct {
// PersistentVolumeClaimCondition contains details about state of pvc
type PersistentVolumeClaimCondition struct {
- Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
- Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Type is the type of the condition.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
+ Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// lastProbeTime is the time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
@@ -2015,7 +2069,7 @@ type KeyToPath struct {
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
-// Local represents directly-attached storage with node affinity (Beta feature)
+// Local represents directly-attached storage with node affinity
type LocalVolumeSource struct {
// path of the full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
@@ -2029,7 +2083,7 @@ type LocalVolumeSource struct {
FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
-// Represents storage that is managed by an external CSI volume driver (Beta feature)
+// Represents storage that is managed by an external CSI volume driver
type CSIPersistentVolumeSource struct {
// driver is the name of the driver to use for this volume.
// Required.
@@ -2476,6 +2530,7 @@ type TCPSocketAction struct {
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
+// GRPCAction specifies an action involving a GRPC service.
type GRPCAction struct {
// Port number of the gRPC service. Number must be in the range 1 to 65535.
Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"`
@@ -2891,17 +2946,16 @@ type Container struct {
// ProbeHandler defines a specific action that should be taken in a probe.
// One and only one of the fields must be specified.
type ProbeHandler struct {
- // Exec specifies the action to take.
+ // Exec specifies a command to execute in the container.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
- // HTTPGet specifies the http request to perform.
+ // HTTPGet specifies an HTTP GET request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
- // TCPSocket specifies an action involving a TCP port.
+ // TCPSocket specifies a connection to a TCP port.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
-
- // GRPC specifies an action involving a GRPC port.
+ // GRPC specifies a GRPC HealthCheckRequest.
// +optional
GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"`
}
@@ -2909,18 +2963,18 @@ type ProbeHandler struct {
// LifecycleHandler defines a specific action that should be taken in a lifecycle
// hook. One and only one of the fields, except TCPSocket must be specified.
type LifecycleHandler struct {
- // Exec specifies the action to take.
+ // Exec specifies a command to execute in the container.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
- // HTTPGet specifies the http request to perform.
+ // HTTPGet specifies an HTTP GET request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
- // for the backward compatibility. There are no validation of this field and
- // lifecycle hooks will fail in runtime when tcp handler is specified.
+ // for backward compatibility. There is no validation of this field and
+ // lifecycle hooks will fail at runtime when it is specified.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
- // Sleep represents the duration that the container should sleep before being terminated.
+ // Sleep represents a duration that the container should sleep.
// +featureGate=PodLifecycleSleepAction
// +optional
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
@@ -3071,7 +3125,7 @@ type ContainerStatus struct {
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
- // +featureGate=InPlacePodVerticalScaling
+ // +featureGate=InPlacePodVerticalScalingAllocatedStatus
// +optional
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
// Resources represents the compute resource requests and limits that have been successfully
@@ -3102,14 +3156,17 @@ type ContainerStatus struct {
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
}
+// ResourceStatus represents the status of a single resource allocated to a Pod.
type ResourceStatus struct {
- // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
+ // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
+ // For DRA resources, the value must be "claim:/".
+ // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
// +required
Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
- // List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
- // At a minimum, ResourceID must uniquely identify the Resource
- // allocated to the Pod on the Node for the lifetime of a Pod.
- // See ResourceID type for it's definition.
+ // List of unique resources health. Each element in the list contains an unique resource ID and its health.
+ // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
+ // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
+ // See ResourceID type definition for a specific format it has in various use cases.
// +listType=map
// +listMapKey=resourceID
Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
@@ -3126,16 +3183,16 @@ const (
// ResourceID is calculated based on the source of this resource health information.
// For DevicePlugin:
//
-// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
+// DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
//
// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
// For DRA:
//
-// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
+// //: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
type ResourceID string
// ResourceHealth represents the health of a resource. It has the latest device health information.
-// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
+// This is a part of KEP https://kep.k8s.io/4680.
type ResourceHealth struct {
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
@@ -3237,7 +3294,7 @@ const (
// during scheduling, for example due to nodeAffinity parsing errors.
PodReasonSchedulerError = "SchedulerError"
- // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
+ // PodReasonTerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
// is initiated by kubelet
PodReasonTerminationByKubelet = "TerminationByKubelet"
@@ -4030,6 +4087,20 @@ type PodSpec struct {
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
+ // Resources is the total amount of CPU and Memory resources required by all
+ // containers in the pod. It supports specifying Requests and Limits for
+ // "cpu" and "memory" resource names only. ResourceClaims are not supported.
+ //
+ // This field enables fine-grained control over resource allocation for the
+ // entire pod, allowing resource sharing among containers in a pod.
+ // TODO: For beta graduation, expand this comment with a detailed explanation.
+ //
+ // This is an alpha field and requires enabling the PodLevelResources feature
+ // gate.
+ //
+ // +featureGate=PodLevelResources
+ // +optional
+ Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
}
// PodResourceClaim references exactly one ResourceClaim, either directly
@@ -4308,6 +4379,22 @@ const (
SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
)
+// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+type PodSELinuxChangePolicy string
+
+const (
+ // Recursive relabeling of all Pod volumes by the container runtime.
+ // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+ SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive"
+ // MountOption mounts all eligible Pod volumes with `-o context` mount option.
+ // This requires all Pods that share the same volume to use the same SELinux label.
+ // It is not possible to share the same volume among privileged and unprivileged Pods.
+ // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ // CSIDriver instance. Other volumes are always re-labelled recursively.
+ SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption"
+)
+
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
@@ -4406,6 +4493,32 @@ type PodSecurityContext struct {
// Note that this field cannot be set when spec.os.name is windows.
// +optional
AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"`
+ // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+ // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+ // Valid values are "MountOption" and "Recursive".
+ //
+ // "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+ // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+ //
+ // "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+ // This requires all Pods that share the same volume to use the same SELinux label.
+ // It is not possible to share the same volume among privileged and unprivileged Pods.
+ // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ // CSIDriver instance. Other volumes are always re-labelled recursively.
+ // "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+ //
+ // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+ // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+ // and "Recursive" for all other volumes.
+ //
+ // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+ //
+ // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+ // Note that this field cannot be set when spec.os.name is windows.
+ // +featureGate=SELinuxChangePolicy
+ // +optional
+ SELinuxChangePolicy *PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty" protobuf:"bytes,13,opt,name=seLinuxChangePolicy"`
}
// SeccompProfile defines a pod/container's seccomp profile settings.
@@ -4513,8 +4626,10 @@ type PodDNSConfig struct {
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
+ // Name is this DNS resolver option's name.
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+ // Value is this DNS resolver option's value.
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
@@ -4807,24 +4922,45 @@ type PodStatus struct {
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
- // The list has one entry per init container in the manifest. The most recent successful
+ // Statuses of init containers in this pod. The most recent successful non-restartable
// init container will have ready = true, the most recently started container will have
// startTime set.
- // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+ // Each init container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
// +listType=atomic
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
- // The list has one entry per container in the manifest.
+ // Statuses of containers in this pod.
+ // Each container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
// +listType=atomic
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
+
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
- // Status for any ephemeral containers that have run in this pod.
+
+ // Statuses for any ephemeral containers that have run in this pod.
+ // Each ephemeral container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
// +listType=atomic
EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"`
@@ -4867,6 +5003,7 @@ type PodStatusResult struct {
// +genclient
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
+// +genclient:method=UpdateResize,verb=update,subresource=resize
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.0
@@ -5558,7 +5695,7 @@ type ServiceSpec struct {
// not set, the implementation will apply its default routing strategy. If set
// to "PreferClose", implementations should prioritize endpoints that are
// topologically close (e.g., same zone).
- // This is an alpha field and requires enabling ServiceTrafficDistribution feature.
+ // This is a beta field and requires enabling ServiceTrafficDistribution feature.
// +featureGate=ServiceTrafficDistribution
// +optional
TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
@@ -5692,6 +5829,8 @@ type ServiceAccount struct {
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
+ // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
+ // Prefer separate namespaces to isolate access to mounted secrets.
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
@@ -6092,7 +6231,7 @@ type NodeStatus struct {
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
@@ -6101,7 +6240,7 @@ type NodeStatus struct {
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
// Note: This field is declared as mergeable, but the merge key is not sufficiently
// unique, which can cause data corruption when it is merged. Callers should instead
// use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
@@ -6119,7 +6258,7 @@ type NodeStatus struct {
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
- // More info: https://kubernetes.io/docs/concepts/nodes/node/#info
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
@@ -6454,10 +6593,13 @@ type NamespaceCondition struct {
Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
@@ -6508,7 +6650,6 @@ type NamespaceList struct {
// +k8s:prerelease-lifecycle-gen:introduced=1.0
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
-// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@@ -6528,6 +6669,15 @@ type Preconditions struct {
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
+const (
+ // LogStreamStdout is the stream type for stdout.
+ LogStreamStdout = "Stdout"
+ // LogStreamStderr is the stream type for stderr.
+ LogStreamStderr = "Stderr"
+ // LogStreamAll represents the combined stdout and stderr.
+ LogStreamAll = "All"
+)
+
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.0
@@ -6562,7 +6712,8 @@ type PodLogOptions struct {
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
- // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime.
+ // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
@@ -6579,6 +6730,14 @@ type PodLogOptions struct {
// the actual log data coming from the real kubelet).
// +optional
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
+
+ // Specify which container log stream to return to the client.
+ // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
+ // are returned interleaved.
+ // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+ // +featureGate=PodLogsQuerySplitStreams
+ // +optional
+ Stream *string `json:"stream,omitempty" protobuf:"varint,10,opt,name=stream"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
@@ -6779,13 +6938,23 @@ type ObjectReference struct {
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +structType=atomic
type LocalObjectReference struct {
// Name of the referent.
// This field is effectively required, but due to backwards compatibility is
// allowed to be empty. Instances of this type with an empty value here are
// almost certainly wrong.
- // TODO: Add other useful fields. apiVersion, kind, uid?
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
// +default=""
@@ -6796,6 +6965,20 @@ type LocalObjectReference struct {
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
+// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
+// and the version of the actual struct is irrelevant.
+// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +structType=atomic
type TypedLocalObjectReference struct {
// APIGroup is the group for the resource being referenced.
@@ -7729,7 +7912,6 @@ const (
)
// PortStatus represents the error condition of a service port
-
type PortStatus struct {
// Port is the port number of the service port of which status is recorded here
Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 950806ef8..89ce3d230 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -117,7 +117,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string {
}
var map_Binding = map[string]string{
- "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.",
+ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"target": "The target object that you want to bind to the standard object.",
}
@@ -127,7 +127,7 @@ func (Binding) SwaggerDoc() map[string]string {
}
var map_CSIPersistentVolumeSource = map[string]string{
- "": "Represents storage that is managed by an external CSI volume driver (Beta feature)",
+ "": "Represents storage that is managed by an external CSI volume driver",
"driver": "driver is the name of the driver to use for this volume. Required.",
"volumeHandle": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
"readOnly": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
@@ -802,6 +802,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
}
var map_GRPCAction = map[string]string{
+ "": "GRPCAction specifies an action involving a GRPC service.",
"port": "Port number of the gRPC service. Number must be in the range 1 to 65535.",
"service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.",
}
@@ -967,10 +968,10 @@ func (Lifecycle) SwaggerDoc() map[string]string {
var map_LifecycleHandler = map[string]string{
"": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.",
- "exec": "Exec specifies the action to take.",
- "httpGet": "HTTPGet specifies the http request to perform.",
- "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.",
- "sleep": "Sleep represents the duration that the container should sleep before being terminated.",
+ "exec": "Exec specifies a command to execute in the container.",
+ "httpGet": "HTTPGet specifies an HTTP GET request to perform.",
+ "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.",
+ "sleep": "Sleep represents a duration that the container should sleep.",
}
func (LifecycleHandler) SwaggerDoc() map[string]string {
@@ -1062,7 +1063,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string {
}
var map_LocalVolumeSource = map[string]string{
- "": "Local represents directly-attached storage with node affinity (Beta feature)",
+ "": "Local represents directly-attached storage with node affinity",
"path": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
"fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.",
}
@@ -1104,9 +1105,12 @@ func (Namespace) SwaggerDoc() map[string]string {
}
var map_NamespaceCondition = map[string]string{
- "": "NamespaceCondition contains details about state of namespace.",
- "type": "Type of namespace controller condition.",
- "status": "Status of the condition, one of True, False, Unknown.",
+ "": "NamespaceCondition contains details about state of namespace.",
+ "type": "Type of namespace controller condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+ "reason": "Unique, one-word, CamelCase reason for the condition's last transition.",
+ "message": "Human-readable message indicating details about last transition.",
}
func (NamespaceCondition) SwaggerDoc() map[string]string {
@@ -1315,10 +1319,10 @@ var map_NodeStatus = map[string]string{
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
- "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
- "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
+ "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition",
+ "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
"daemonEndpoints": "Endpoints of daemons running on the Node.",
- "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info",
+ "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info",
"images": "List of container images on this node",
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
"volumesAttached": "List of volumes that are attached to the node.",
@@ -1398,6 +1402,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
var map_PersistentVolumeClaimCondition = map[string]string{
"": "PersistentVolumeClaimCondition contains details about state of pvc",
+ "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about",
+ "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required",
"lastProbeTime": "lastProbeTime is the time we probed the condition.",
"lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.",
"reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.",
@@ -1483,28 +1489,28 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string {
var map_PersistentVolumeSource = map[string]string{
"": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
- "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
- "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
"hostPath": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
- "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
"nfs": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
- "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
- "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
- "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
"fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
- "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
- "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
- "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
- "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
- "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
- "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
- "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
- "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
- "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
+ "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
+ "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
+ "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
+ "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
+ "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
+ "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
+ "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
+ "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
"local": "local represents directly-attached storage with node affinity",
- "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md",
- "csi": "csi represents storage that is handled by an external CSI driver (Beta feature).",
+ "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md",
+ "csi": "csi represents storage that is handled by an external CSI driver.",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@@ -1634,8 +1640,9 @@ func (PodDNSConfig) SwaggerDoc() map[string]string {
}
var map_PodDNSConfigOption = map[string]string{
- "": "PodDNSConfigOption defines DNS resolver options of a pod.",
- "name": "Required.",
+ "": "PodDNSConfigOption defines DNS resolver options of a pod.",
+ "name": "Name is this DNS resolver option's name. Required.",
+ "value": "Value is this DNS resolver option's value.",
}
func (PodDNSConfigOption) SwaggerDoc() map[string]string {
@@ -1683,9 +1690,10 @@ var map_PodLogOptions = map[string]string{
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
- "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
+ "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
+ "stream": "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
}
func (PodLogOptions) SwaggerDoc() map[string]string {
@@ -1772,6 +1780,7 @@ var map_PodSecurityContext = map[string]string{
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
"seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
"appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
+ "seLinuxChangePolicy": "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.",
}
func (PodSecurityContext) SwaggerDoc() map[string]string {
@@ -1828,6 +1837,7 @@ var map_PodSpec = map[string]string{
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
+ "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@@ -1846,10 +1856,10 @@ var map_PodStatus = map[string]string{
"podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
"podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
- "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
- "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+ "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
+ "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
- "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.",
+ "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
"resourceClaimStatuses": "Status of resource claims.",
}
@@ -1899,6 +1909,7 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string {
}
var map_PortStatus = map[string]string{
+ "": "PortStatus represents the error condition of a service port",
"port": "Port is the port number of the service port of which status is recorded here",
"protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"",
"error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.",
@@ -1966,10 +1977,10 @@ func (Probe) SwaggerDoc() map[string]string {
var map_ProbeHandler = map[string]string{
"": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.",
- "exec": "Exec specifies the action to take.",
- "httpGet": "HTTPGet specifies the http request to perform.",
- "tcpSocket": "TCPSocket specifies an action involving a TCP port.",
- "grpc": "GRPC specifies an action involving a GRPC port.",
+ "exec": "Exec specifies a command to execute in the container.",
+ "httpGet": "HTTPGet specifies an HTTP GET request to perform.",
+ "tcpSocket": "TCPSocket specifies a connection to a TCP port.",
+ "grpc": "GRPC specifies a GRPC HealthCheckRequest.",
}
func (ProbeHandler) SwaggerDoc() map[string]string {
@@ -2125,7 +2136,7 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string {
}
var map_ResourceHealth = map[string]string{
- "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.",
+ "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.",
"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
"health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
}
@@ -2188,8 +2199,9 @@ func (ResourceRequirements) SwaggerDoc() map[string]string {
}
var map_ResourceStatus = map[string]string{
- "name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.",
- "resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.",
+ "": "ResourceStatus represents the status of a single resource allocated to a Pod.",
+ "name": "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:/\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.",
+ "resources": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.",
}
func (ResourceStatus) SwaggerDoc() map[string]string {
@@ -2391,7 +2403,7 @@ func (Service) SwaggerDoc() map[string]string {
var map_ServiceAccount = map[string]string{
"": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
- "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret",
+ "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret",
"imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
}
@@ -2475,7 +2487,7 @@ var map_ServiceSpec = map[string]string{
"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
"loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
"internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
- "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.",
+ "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
}
func (ServiceSpec) SwaggerDoc() map[string]string {
@@ -2628,6 +2640,7 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
}
var map_TypedObjectReference = map[string]string{
+ "": "TypedObjectReference contains enough information to let you locate the typed referenced object",
"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
"kind": "Kind is the type of resource being referenced",
"name": "Name is the name of resource being referenced",
@@ -2720,32 +2733,32 @@ var map_VolumeSource = map[string]string{
"": "Represents the source of a volume to mount. Only one of its members may be specified.",
"hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
"emptyDir": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
- "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
- "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
- "gitRepo": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+ "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
- "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
- "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
- "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
- "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
- "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
- "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
+ "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
+ "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
"downwardAPI": "downwardAPI represents downward API about the pod that should populate this volume",
"fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
- "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
"configMap": "configMap represents a configMap that should populate this volume",
- "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
- "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
- "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
- "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
+ "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
+ "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
+ "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
"projected": "projected items for all in one resources secrets, configmaps, and downward API",
- "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
- "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
- "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
- "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
+ "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
+ "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
+ "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
+ "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
}
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index 3d23f7f62..3f669092e 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -3935,6 +3935,11 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
*out = new(int64)
**out = **in
}
+ if in.Stream != nil {
+ in, out := &in.Stream, &out.Stream
+ *out = new(string)
+ **out = **in
+ }
return
}
@@ -4169,6 +4174,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = new(AppArmorProfile)
(*in).DeepCopyInto(*out)
}
+ if in.SELinuxChangePolicy != nil {
+ in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy
+ *out = new(PodSELinuxChangePolicy)
+ **out = **in
+ }
return
}
@@ -4361,6 +4371,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/resource/v1alpha3/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go
index aeb66561f..ffc21307d 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/doc.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go
@@ -17,7 +17,7 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
-
+// +k8s:prerelease-lifecycle-gen=true
// +groupName=resource.k8s.io
// Package v1alpha3 is the v1alpha3 version of the resource API.
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
index 4ac01cc6f..540f7b818 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
@@ -26,8 +26,9 @@ import (
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
- v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
@@ -48,10 +49,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_66649ee9bbcd89d2, []int{0}
+}
+func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
+}
+func (m *AllocatedDeviceStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
+
func (m *AllocationResult) Reset() { *m = AllocationResult{} }
func (*AllocationResult) ProtoMessage() {}
func (*AllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{0}
+ return fileDescriptor_66649ee9bbcd89d2, []int{1}
}
func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -79,7 +108,7 @@ var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
func (m *BasicDevice) Reset() { *m = BasicDevice{} }
func (*BasicDevice) ProtoMessage() {}
func (*BasicDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{1}
+ return fileDescriptor_66649ee9bbcd89d2, []int{2}
}
func (m *BasicDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -107,7 +136,7 @@ var xxx_messageInfo_BasicDevice proto.InternalMessageInfo
func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
func (*CELDeviceSelector) ProtoMessage() {}
func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{2}
+ return fileDescriptor_66649ee9bbcd89d2, []int{3}
}
func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -135,7 +164,7 @@ var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
func (m *Device) Reset() { *m = Device{} }
func (*Device) ProtoMessage() {}
func (*Device) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{3}
+ return fileDescriptor_66649ee9bbcd89d2, []int{4}
}
func (m *Device) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -163,7 +192,7 @@ var xxx_messageInfo_Device proto.InternalMessageInfo
func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
func (*DeviceAllocationConfiguration) ProtoMessage() {}
func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{4}
+ return fileDescriptor_66649ee9bbcd89d2, []int{5}
}
func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -191,7 +220,7 @@ var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
func (*DeviceAllocationResult) ProtoMessage() {}
func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{5}
+ return fileDescriptor_66649ee9bbcd89d2, []int{6}
}
func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -219,7 +248,7 @@ var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
func (*DeviceAttribute) ProtoMessage() {}
func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{6}
+ return fileDescriptor_66649ee9bbcd89d2, []int{7}
}
func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -247,7 +276,7 @@ var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
func (*DeviceClaim) ProtoMessage() {}
func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{7}
+ return fileDescriptor_66649ee9bbcd89d2, []int{8}
}
func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -275,7 +304,7 @@ var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
func (*DeviceClaimConfiguration) ProtoMessage() {}
func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{8}
+ return fileDescriptor_66649ee9bbcd89d2, []int{9}
}
func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -303,7 +332,7 @@ var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
func (m *DeviceClass) Reset() { *m = DeviceClass{} }
func (*DeviceClass) ProtoMessage() {}
func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{9}
+ return fileDescriptor_66649ee9bbcd89d2, []int{10}
}
func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -331,7 +360,7 @@ var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
func (*DeviceClassConfiguration) ProtoMessage() {}
func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{10}
+ return fileDescriptor_66649ee9bbcd89d2, []int{11}
}
func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -359,7 +388,7 @@ var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
func (*DeviceClassList) ProtoMessage() {}
func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{11}
+ return fileDescriptor_66649ee9bbcd89d2, []int{12}
}
func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -387,7 +416,7 @@ var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
func (*DeviceClassSpec) ProtoMessage() {}
func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{12}
+ return fileDescriptor_66649ee9bbcd89d2, []int{13}
}
func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -415,7 +444,7 @@ var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
func (*DeviceConfiguration) ProtoMessage() {}
func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{13}
+ return fileDescriptor_66649ee9bbcd89d2, []int{14}
}
func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -443,7 +472,7 @@ var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
func (*DeviceConstraint) ProtoMessage() {}
func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{14}
+ return fileDescriptor_66649ee9bbcd89d2, []int{15}
}
func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -471,7 +500,7 @@ var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
func (*DeviceRequest) ProtoMessage() {}
func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{15}
+ return fileDescriptor_66649ee9bbcd89d2, []int{16}
}
func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -499,7 +528,7 @@ var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
func (*DeviceRequestAllocationResult) ProtoMessage() {}
func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{16}
+ return fileDescriptor_66649ee9bbcd89d2, []int{17}
}
func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -527,7 +556,7 @@ var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
func (*DeviceSelector) ProtoMessage() {}
func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{17}
+ return fileDescriptor_66649ee9bbcd89d2, []int{18}
}
func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -552,43 +581,15 @@ func (m *DeviceSelector) XXX_DiscardUnknown() {
var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
-func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-func (*OpaqueDeviceConfiguration) ProtoMessage() {}
-func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{18}
-}
-func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
-
-func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} }
-func (*PodSchedulingContext) ProtoMessage() {}
-func (*PodSchedulingContext) Descriptor() ([]byte, []int) {
+func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
+func (*NetworkDeviceData) ProtoMessage() {}
+func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{19}
}
-func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error {
+func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -596,83 +597,27 @@ func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte
}
return b[:n], nil
}
-func (m *PodSchedulingContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSchedulingContext.Merge(m, src)
+func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkDeviceData.Merge(m, src)
}
-func (m *PodSchedulingContext) XXX_Size() int {
+func (m *NetworkDeviceData) XXX_Size() int {
return m.Size()
}
-func (m *PodSchedulingContext) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m)
+func (m *NetworkDeviceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
}
-var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo
+var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
-func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} }
-func (*PodSchedulingContextList) ProtoMessage() {}
-func (*PodSchedulingContextList) Descriptor() ([]byte, []int) {
+func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
return fileDescriptor_66649ee9bbcd89d2, []int{20}
}
-func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSchedulingContextList.Merge(m, src)
-}
-func (m *PodSchedulingContextList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSchedulingContextList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo
-
-func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} }
-func (*PodSchedulingContextSpec) ProtoMessage() {}
-func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{21}
-}
-func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src)
-}
-func (m *PodSchedulingContextSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo
-
-func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} }
-func (*PodSchedulingContextStatus) ProtoMessage() {}
-func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{22}
-}
-func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error {
+func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -680,22 +625,22 @@ func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) (
}
return b[:n], nil
}
-func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src)
+func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
}
-func (m *PodSchedulingContextStatus) XXX_Size() int {
+func (m *OpaqueDeviceConfiguration) XXX_Size() int {
return m.Size()
}
-func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m)
+func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
}
-var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo
+var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {}
func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{23}
+ return fileDescriptor_66649ee9bbcd89d2, []int{21}
}
func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -723,7 +668,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
func (*ResourceClaimConsumerReference) ProtoMessage() {}
func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{24}
+ return fileDescriptor_66649ee9bbcd89d2, []int{22}
}
func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -751,7 +696,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
func (*ResourceClaimList) ProtoMessage() {}
func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{25}
+ return fileDescriptor_66649ee9bbcd89d2, []int{23}
}
func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -776,38 +721,10 @@ func (m *ResourceClaimList) XXX_DiscardUnknown() {
var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
-func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} }
-func (*ResourceClaimSchedulingStatus) ProtoMessage() {}
-func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{26}
-}
-func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src)
-}
-func (m *ResourceClaimSchedulingStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo
-
func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
func (*ResourceClaimSpec) ProtoMessage() {}
func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{27}
+ return fileDescriptor_66649ee9bbcd89d2, []int{24}
}
func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -835,7 +752,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
func (*ResourceClaimStatus) ProtoMessage() {}
func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{28}
+ return fileDescriptor_66649ee9bbcd89d2, []int{25}
}
func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -863,7 +780,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
func (*ResourceClaimTemplate) ProtoMessage() {}
func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{29}
+ return fileDescriptor_66649ee9bbcd89d2, []int{26}
}
func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -891,7 +808,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
func (*ResourceClaimTemplateList) ProtoMessage() {}
func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{30}
+ return fileDescriptor_66649ee9bbcd89d2, []int{27}
}
func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -919,7 +836,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
func (*ResourceClaimTemplateSpec) ProtoMessage() {}
func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{31}
+ return fileDescriptor_66649ee9bbcd89d2, []int{28}
}
func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -947,7 +864,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
func (m *ResourcePool) Reset() { *m = ResourcePool{} }
func (*ResourcePool) ProtoMessage() {}
func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{32}
+ return fileDescriptor_66649ee9bbcd89d2, []int{29}
}
func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -975,7 +892,7 @@ var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
func (*ResourceSlice) ProtoMessage() {}
func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{33}
+ return fileDescriptor_66649ee9bbcd89d2, []int{30}
}
func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1003,7 +920,7 @@ var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
func (*ResourceSliceList) ProtoMessage() {}
func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{34}
+ return fileDescriptor_66649ee9bbcd89d2, []int{31}
}
func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1031,7 +948,7 @@ var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
func (*ResourceSliceSpec) ProtoMessage() {}
func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{35}
+ return fileDescriptor_66649ee9bbcd89d2, []int{32}
}
func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1057,6 +974,7 @@ func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1alpha3.AllocatedDeviceStatus")
proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult")
proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice")
proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry")
@@ -1077,15 +995,11 @@ func init() {
proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest")
proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult")
proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
+ proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData")
proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration")
- proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext")
- proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList")
- proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec")
- proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus")
proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim")
proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference")
proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList")
- proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus")
proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec")
proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus")
proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate")
@@ -1102,138 +1016,208 @@ func init() {
}
var fileDescriptor_66649ee9bbcd89d2 = []byte{
- // 2085 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57,
- 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda,
- 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26,
- 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12,
- 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24,
- 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b,
- 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3,
- 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4,
- 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a,
- 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5,
- 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30,
- 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78,
- 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96,
- 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71,
- 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4,
- 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81,
- 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a,
- 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f,
- 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d,
- 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e,
- 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a,
- 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2,
- 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a,
- 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf,
- 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9,
- 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4,
- 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68,
- 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a,
- 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26,
- 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52,
- 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b,
- 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed,
- 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93,
- 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6,
- 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d,
- 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1,
- 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9,
- 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91,
- 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06,
- 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3,
- 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59,
- 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54,
- 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d,
- 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66,
- 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6,
- 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa,
- 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71,
- 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85,
- 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16,
- 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74,
- 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66,
- 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82,
- 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50,
- 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21,
- 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c,
- 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe,
- 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6,
- 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38,
- 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed,
- 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30,
- 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4,
- 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17,
- 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47,
- 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93,
- 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11,
- 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd,
- 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf,
- 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98,
- 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0,
- 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d,
- 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67,
- 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf,
- 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e,
- 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2,
- 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a,
- 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12,
- 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8,
- 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88,
- 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8,
- 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30,
- 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0,
- 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba,
- 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0,
- 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe,
- 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e,
- 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17,
- 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41,
- 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65,
- 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a,
- 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79,
- 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1,
- 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9,
- 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96,
- 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49,
- 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f,
- 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e,
- 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5,
- 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96,
- 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0,
- 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10,
- 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31,
- 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83,
- 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3,
- 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36,
- 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0,
- 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf,
- 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3,
- 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc,
- 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb,
- 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7,
- 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1,
- 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b,
- 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93,
- 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e,
- 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45,
- 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed,
- 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff,
- 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40,
- 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00,
- 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f,
- 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8,
- 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac,
- 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a,
- 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56,
- 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5,
- 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92,
- 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d,
- 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37,
- 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71,
- 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6,
- 0x20, 0x64, 0x20, 0x00, 0x00,
+ // 2030 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0x1c, 0x57,
+ 0xdd, 0xb3, 0xe3, 0xcf, 0xdf, 0xfa, 0x2b, 0x2f, 0xa4, 0x38, 0xa6, 0xec, 0x3a, 0x53, 0x04, 0x4e,
+ 0x9b, 0xee, 0x36, 0x4e, 0xd5, 0x16, 0xc2, 0x01, 0x8f, 0xed, 0x06, 0x47, 0x89, 0xe3, 0x3c, 0xb7,
+ 0x11, 0x81, 0x12, 0x78, 0x9e, 0x7d, 0xb6, 0x07, 0xcf, 0xce, 0x4c, 0xe7, 0xbd, 0x71, 0xea, 0x0b,
+ 0xaa, 0xe0, 0x1e, 0xf1, 0x0f, 0x20, 0x0e, 0x48, 0x48, 0x5c, 0x80, 0xff, 0x00, 0x24, 0x90, 0x88,
+ 0xe0, 0x12, 0x09, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xce, 0xdc, 0x73, 0x42, 0xef, 0xcd, 0x9b, 0xcf,
+ 0xdd, 0x71, 0xc6, 0x55, 0xb1, 0xd2, 0xdb, 0xce, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xb8, 0x72,
+ 0xf8, 0x0e, 0x6b, 0xd9, 0x5e, 0x9b, 0xf8, 0x76, 0x3b, 0xa0, 0xcc, 0x0b, 0x03, 0x8b, 0xb6, 0x8f,
+ 0xae, 0x12, 0xc7, 0x3f, 0x20, 0xd7, 0xda, 0xfb, 0xd4, 0xa5, 0x01, 0xe1, 0xb4, 0xd3, 0xf2, 0x03,
+ 0x8f, 0x7b, 0xe8, 0xe5, 0x88, 0xba, 0x45, 0x7c, 0xbb, 0x15, 0x53, 0xb7, 0x62, 0xea, 0xc5, 0xd7,
+ 0xf7, 0x6d, 0x7e, 0x10, 0xee, 0xb6, 0x2c, 0xaf, 0xdb, 0xde, 0xf7, 0xf6, 0xbd, 0xb6, 0x64, 0xda,
+ 0x0d, 0xf7, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x22, 0x61, 0x8b, 0x46, 0x46, 0xb5, 0xe5, 0x05, 0x42,
+ 0x6d, 0x51, 0xe1, 0xe2, 0x9b, 0x29, 0x4d, 0x97, 0x58, 0x07, 0xb6, 0x4b, 0x83, 0xe3, 0xb6, 0x7f,
+ 0xb8, 0x9f, 0xb7, 0xf7, 0x34, 0x5c, 0xac, 0xdd, 0xa5, 0x9c, 0x0c, 0xd3, 0xd5, 0x2e, 0xe3, 0x0a,
+ 0x42, 0x97, 0xdb, 0xdd, 0x41, 0x35, 0x6f, 0x3d, 0x8f, 0x81, 0x59, 0x07, 0xb4, 0x4b, 0x8a, 0x7c,
+ 0xc6, 0xaf, 0x75, 0xb8, 0xb0, 0xea, 0x38, 0x9e, 0x25, 0x60, 0xeb, 0xf4, 0xc8, 0xb6, 0xe8, 0x0e,
+ 0x27, 0x3c, 0x64, 0xe8, 0xeb, 0x30, 0xde, 0x09, 0xec, 0x23, 0x1a, 0x2c, 0x68, 0x4b, 0xda, 0xf2,
+ 0x94, 0x39, 0xfb, 0xb8, 0xd7, 0x1c, 0xe9, 0xf7, 0x9a, 0xe3, 0xeb, 0x12, 0x8a, 0x15, 0x16, 0x2d,
+ 0xc1, 0xa8, 0xef, 0x79, 0xce, 0x42, 0x4d, 0x52, 0x4d, 0x2b, 0xaa, 0xd1, 0x6d, 0xcf, 0x73, 0xb0,
+ 0xc4, 0x48, 0x49, 0x52, 0xf2, 0x82, 0x5e, 0x90, 0x24, 0xa1, 0x58, 0x61, 0x91, 0x05, 0x60, 0x79,
+ 0x6e, 0xc7, 0xe6, 0xb6, 0xe7, 0xb2, 0x85, 0xd1, 0x25, 0x7d, 0xb9, 0xbe, 0xd2, 0x6e, 0xa5, 0x61,
+ 0x4e, 0x0e, 0xd6, 0xf2, 0x0f, 0xf7, 0x05, 0x80, 0xb5, 0x84, 0xff, 0x5a, 0x47, 0x57, 0x5b, 0x6b,
+ 0x31, 0x9f, 0x89, 0x94, 0x70, 0x48, 0x40, 0x0c, 0x67, 0xc4, 0xa2, 0x3b, 0x30, 0xda, 0x21, 0x9c,
+ 0x2c, 0x8c, 0x2d, 0x69, 0xcb, 0xf5, 0x95, 0xd7, 0x4b, 0xc5, 0x2b, 0xbf, 0xb5, 0x30, 0x79, 0xb8,
+ 0xf1, 0x11, 0xa7, 0x2e, 0x13, 0xc2, 0x93, 0xd3, 0xad, 0x13, 0x4e, 0xb0, 0x14, 0x84, 0x76, 0xa1,
+ 0xee, 0x52, 0xfe, 0xd0, 0x0b, 0x0e, 0x05, 0x70, 0x61, 0x5c, 0xca, 0xcd, 0x9a, 0x3d, 0x98, 0x9d,
+ 0xad, 0x2d, 0xc5, 0x20, 0xcf, 0x2d, 0xd8, 0xcc, 0xb9, 0x7e, 0xaf, 0x59, 0xdf, 0x4a, 0xe5, 0xe0,
+ 0xac, 0x50, 0xe3, 0xef, 0x1a, 0xcc, 0xab, 0x28, 0xd9, 0x9e, 0x8b, 0x29, 0x0b, 0x1d, 0x8e, 0x7e,
+ 0x04, 0x13, 0x91, 0xe3, 0x98, 0x8c, 0x50, 0x7d, 0xe5, 0xcd, 0x93, 0x95, 0x46, 0xda, 0x8a, 0x62,
+ 0xcc, 0x39, 0x75, 0xa6, 0x89, 0x08, 0xcf, 0x70, 0x2c, 0x15, 0xdd, 0x83, 0x69, 0xd7, 0xeb, 0xd0,
+ 0x1d, 0xea, 0x50, 0x8b, 0x7b, 0x81, 0x8c, 0x5e, 0x7d, 0x65, 0x29, 0xab, 0x45, 0xd4, 0x8a, 0xf0,
+ 0xff, 0x56, 0x86, 0xce, 0x9c, 0xef, 0xf7, 0x9a, 0xd3, 0x59, 0x08, 0xce, 0xc9, 0x31, 0x3e, 0xd5,
+ 0xa1, 0x6e, 0x12, 0x66, 0x5b, 0x91, 0x46, 0xf4, 0x53, 0x00, 0xc2, 0x79, 0x60, 0xef, 0x86, 0x5c,
+ 0x9e, 0x45, 0xc4, 0xfd, 0x9b, 0x27, 0x9f, 0x25, 0xc3, 0xde, 0x5a, 0x4d, 0x78, 0x37, 0x5c, 0x1e,
+ 0x1c, 0x9b, 0xaf, 0xc4, 0x19, 0x90, 0x22, 0x7e, 0xf6, 0xaf, 0xe6, 0xcc, 0xdd, 0x90, 0x38, 0xf6,
+ 0x9e, 0x4d, 0x3b, 0x5b, 0xa4, 0x4b, 0x71, 0x46, 0x23, 0x3a, 0x82, 0x49, 0x8b, 0xf8, 0xc4, 0xb2,
+ 0xf9, 0xf1, 0x42, 0x4d, 0x6a, 0x7f, 0xbb, 0xba, 0xf6, 0x35, 0xc5, 0x19, 0xe9, 0xbe, 0xa4, 0x74,
+ 0x4f, 0xc6, 0xe0, 0x41, 0xcd, 0x89, 0xae, 0x45, 0x07, 0xe6, 0x0a, 0xb6, 0xa3, 0x79, 0xd0, 0x0f,
+ 0xe9, 0x71, 0x54, 0x71, 0x58, 0xfc, 0x44, 0x6b, 0x30, 0x76, 0x44, 0x9c, 0x90, 0xca, 0xfa, 0xca,
+ 0x27, 0x6c, 0x79, 0x8c, 0x63, 0xa9, 0x38, 0xe2, 0xfd, 0x56, 0xed, 0x1d, 0x6d, 0xf1, 0x10, 0x66,
+ 0x72, 0xb6, 0x0e, 0xd1, 0xb5, 0x9e, 0xd7, 0xd5, 0x3a, 0xa9, 0xf6, 0x52, 0xe5, 0x77, 0x43, 0xe2,
+ 0x72, 0x9b, 0x1f, 0x67, 0x94, 0x19, 0x37, 0xe0, 0xdc, 0xda, 0xc6, 0x2d, 0xd5, 0x4f, 0x54, 0xdc,
+ 0xd1, 0x0a, 0x00, 0xfd, 0xc8, 0x0f, 0x28, 0x13, 0xb5, 0xa4, 0xba, 0x4a, 0x52, 0xae, 0x1b, 0x09,
+ 0x06, 0x67, 0xa8, 0x8c, 0x23, 0x50, 0x5d, 0x42, 0xf4, 0x19, 0x97, 0x74, 0xa9, 0xe2, 0x4b, 0x2a,
+ 0x51, 0xfa, 0x54, 0x62, 0xd0, 0x4d, 0x18, 0xdb, 0x15, 0x91, 0x51, 0xe6, 0x5f, 0xae, 0x1c, 0x44,
+ 0x73, 0xaa, 0xdf, 0x6b, 0x8e, 0x49, 0x00, 0x8e, 0x44, 0x18, 0x8f, 0x6a, 0xf0, 0xd5, 0x62, 0xc1,
+ 0xac, 0x79, 0xee, 0x9e, 0xbd, 0x1f, 0x06, 0xf2, 0x03, 0x7d, 0x07, 0xc6, 0x23, 0x91, 0xca, 0xa2,
+ 0xe5, 0xb8, 0xab, 0xed, 0x48, 0xe8, 0xb3, 0x5e, 0xf3, 0xa5, 0x22, 0x6b, 0x84, 0xc1, 0x8a, 0x0f,
+ 0x2d, 0xc3, 0x64, 0x40, 0x3f, 0x0c, 0x29, 0xe3, 0x4c, 0xe6, 0xdd, 0x94, 0x39, 0x2d, 0x52, 0x07,
+ 0x2b, 0x18, 0x4e, 0xb0, 0xe8, 0x63, 0x0d, 0xce, 0x47, 0x55, 0x99, 0xb3, 0x41, 0x55, 0xe4, 0xd5,
+ 0x2a, 0x39, 0x91, 0x63, 0x34, 0xbf, 0xa2, 0x8c, 0x3d, 0x3f, 0x04, 0x89, 0x87, 0xa9, 0x32, 0xfe,
+ 0xa3, 0xc1, 0x4b, 0xc3, 0x3b, 0x08, 0xda, 0x83, 0x89, 0x40, 0xfe, 0x8a, 0x8b, 0xf7, 0x7a, 0x15,
+ 0x83, 0xd4, 0x31, 0xcb, 0xfb, 0x51, 0xf4, 0xcd, 0x70, 0x2c, 0x1c, 0x59, 0x30, 0x6e, 0x49, 0x9b,
+ 0x54, 0x95, 0x5e, 0x3f, 0x5d, 0xbf, 0xcb, 0x7b, 0x20, 0x19, 0x42, 0x11, 0x18, 0x2b, 0xd1, 0xc6,
+ 0x6f, 0x35, 0x98, 0x2b, 0x54, 0x11, 0x6a, 0x80, 0x6e, 0xbb, 0x5c, 0xa6, 0x95, 0x1e, 0xc5, 0x68,
+ 0xd3, 0xe5, 0xf7, 0x44, 0xb2, 0x63, 0x81, 0x40, 0x97, 0x60, 0x74, 0x57, 0x8c, 0x40, 0x11, 0x8e,
+ 0x49, 0x73, 0xa6, 0xdf, 0x6b, 0x4e, 0x99, 0x9e, 0xe7, 0x44, 0x14, 0x12, 0x85, 0xbe, 0x01, 0xe3,
+ 0x8c, 0x07, 0xb6, 0xbb, 0xbf, 0x30, 0x2a, 0xb3, 0x45, 0xf6, 0xfb, 0x1d, 0x09, 0x89, 0xc8, 0x14,
+ 0x1a, 0xbd, 0x0a, 0x13, 0x47, 0x34, 0x90, 0x15, 0x32, 0x26, 0x29, 0x65, 0x37, 0xbd, 0x17, 0x81,
+ 0x22, 0xd2, 0x98, 0xc0, 0xf8, 0x7d, 0x0d, 0xea, 0x2a, 0x80, 0x0e, 0xb1, 0xbb, 0xe8, 0x7e, 0x26,
+ 0xa1, 0xa2, 0x48, 0xbc, 0x76, 0x8a, 0x48, 0x98, 0xf3, 0x71, 0xf3, 0x1a, 0x92, 0x81, 0x14, 0xea,
+ 0x96, 0xe7, 0x32, 0x1e, 0x10, 0xdb, 0x55, 0xe9, 0x9a, 0x6f, 0x10, 0x27, 0x25, 0x9e, 0x62, 0x33,
+ 0xcf, 0x2b, 0x05, 0xf5, 0x14, 0xc6, 0x70, 0x56, 0x2e, 0x7a, 0x90, 0x84, 0x58, 0x97, 0x1a, 0xde,
+ 0xaa, 0xa4, 0x41, 0x1c, 0xbe, 0x5a, 0x74, 0xff, 0xaa, 0xc1, 0x42, 0x19, 0x53, 0xae, 0x1e, 0xb5,
+ 0xcf, 0x54, 0x8f, 0xb5, 0xb3, 0xab, 0xc7, 0x3f, 0x69, 0x99, 0xd8, 0x33, 0x86, 0x7e, 0x0c, 0x93,
+ 0x62, 0x19, 0x92, 0xbb, 0x4d, 0xb4, 0x0e, 0xbc, 0x51, 0x6d, 0x75, 0xba, 0xb3, 0xfb, 0x13, 0x6a,
+ 0xf1, 0xdb, 0x94, 0x93, 0xb4, 0x19, 0xa7, 0x30, 0x9c, 0x48, 0x15, 0x9b, 0x13, 0xf3, 0xa9, 0x75,
+ 0x9a, 0x41, 0x24, 0x4d, 0xdb, 0xf1, 0xa9, 0x95, 0xf6, 0x6b, 0xf1, 0x85, 0xa5, 0x20, 0xe3, 0x97,
+ 0xd9, 0x60, 0x30, 0x96, 0x0f, 0x46, 0x99, 0x8b, 0xb5, 0xb3, 0x73, 0xf1, 0x1f, 0x93, 0x56, 0x20,
+ 0xed, 0xbb, 0x65, 0x33, 0x8e, 0x3e, 0x18, 0x70, 0x73, 0xab, 0x9a, 0x9b, 0x05, 0xb7, 0x74, 0x72,
+ 0x52, 0x65, 0x31, 0x24, 0xe3, 0xe2, 0x2d, 0x18, 0xb3, 0x39, 0xed, 0xc6, 0xf5, 0x75, 0xb9, 0xb2,
+ 0x8f, 0xcd, 0x19, 0x25, 0x75, 0x6c, 0x53, 0xf0, 0xe3, 0x48, 0x8c, 0xf1, 0x24, 0x7f, 0x02, 0xe1,
+ 0x7b, 0xf4, 0x43, 0x98, 0x62, 0x6a, 0x22, 0xc7, 0x5d, 0xe2, 0x4a, 0x15, 0x3d, 0xc9, 0x7a, 0x77,
+ 0x4e, 0xa9, 0x9a, 0x8a, 0x21, 0x0c, 0xa7, 0x12, 0x33, 0x15, 0x5c, 0x3b, 0x55, 0x05, 0x17, 0xe2,
+ 0x5f, 0x5a, 0xc1, 0x01, 0x0c, 0x0b, 0x20, 0xfa, 0x01, 0x8c, 0x7b, 0x3e, 0xf9, 0x30, 0xa4, 0x2a,
+ 0x2a, 0xcf, 0xd9, 0xe0, 0xee, 0x48, 0xda, 0x61, 0x69, 0x02, 0x42, 0x67, 0x84, 0xc6, 0x4a, 0xa4,
+ 0xf1, 0x48, 0x83, 0xf9, 0x62, 0x33, 0x3b, 0x45, 0xb7, 0xd8, 0x86, 0xd9, 0x2e, 0xe1, 0xd6, 0x41,
+ 0x32, 0x50, 0xd4, 0x5d, 0x69, 0xb9, 0xdf, 0x6b, 0xce, 0xde, 0xce, 0x61, 0x9e, 0xf5, 0x9a, 0xe8,
+ 0xdd, 0xd0, 0x71, 0x8e, 0xf3, 0x3b, 0x63, 0x81, 0xdf, 0xf8, 0xb9, 0x0e, 0x33, 0xb9, 0xde, 0x5d,
+ 0x61, 0x3b, 0x5a, 0x85, 0xb9, 0x4e, 0xea, 0x6c, 0x81, 0x50, 0x66, 0x7c, 0x59, 0x11, 0x67, 0x33,
+ 0x45, 0xf2, 0x15, 0xe9, 0xf3, 0xa9, 0xa3, 0x7f, 0xee, 0xa9, 0x73, 0x0f, 0x66, 0x49, 0x32, 0xad,
+ 0x6f, 0x7b, 0x1d, 0xaa, 0x66, 0x65, 0x4b, 0x71, 0xcd, 0xae, 0xe6, 0xb0, 0xcf, 0x7a, 0xcd, 0x2f,
+ 0x15, 0x67, 0xbc, 0x80, 0xe3, 0x82, 0x14, 0xf4, 0x0a, 0x8c, 0x59, 0x5e, 0xe8, 0x72, 0x39, 0x50,
+ 0xf5, 0xb4, 0x54, 0xd6, 0x04, 0x10, 0x47, 0x38, 0x74, 0x15, 0xea, 0xa4, 0xd3, 0xb5, 0xdd, 0x55,
+ 0xcb, 0xa2, 0x8c, 0xc9, 0x6b, 0xdc, 0x64, 0x34, 0xa5, 0x57, 0x53, 0x30, 0xce, 0xd2, 0x18, 0xff,
+ 0xd5, 0xe2, 0x1d, 0xb1, 0x64, 0x97, 0x41, 0x97, 0xc5, 0x66, 0x24, 0x51, 0x2a, 0x30, 0x99, 0xe5,
+ 0x46, 0x82, 0x71, 0x8c, 0xcf, 0x5c, 0xb7, 0x6b, 0x95, 0xae, 0xdb, 0x7a, 0x85, 0xeb, 0xf6, 0xe8,
+ 0x89, 0xd7, 0xed, 0xc2, 0x89, 0xc7, 0x2a, 0x9c, 0xf8, 0x03, 0x98, 0x2d, 0xec, 0xf4, 0x37, 0x41,
+ 0xb7, 0xa8, 0xa3, 0x8a, 0xee, 0x39, 0xb7, 0xde, 0x81, 0x1b, 0x81, 0x39, 0xd1, 0xef, 0x35, 0xf5,
+ 0xb5, 0x8d, 0x5b, 0x58, 0x08, 0x31, 0x7e, 0xa7, 0xc1, 0xb9, 0x81, 0x9b, 0x31, 0xba, 0x0e, 0x33,
+ 0xb6, 0xcb, 0x69, 0xb0, 0x47, 0x2c, 0xba, 0x95, 0xa6, 0xf8, 0x05, 0x75, 0xaa, 0x99, 0xcd, 0x2c,
+ 0x12, 0xe7, 0x69, 0xd1, 0x45, 0xd0, 0x6d, 0x3f, 0xde, 0xae, 0xa5, 0xb6, 0xcd, 0x6d, 0x86, 0x05,
+ 0x4c, 0xd4, 0xc3, 0x01, 0x09, 0x3a, 0x0f, 0x49, 0x40, 0x57, 0x3b, 0x1d, 0x71, 0xdf, 0x50, 0x3e,
+ 0x4d, 0xea, 0xe1, 0xbb, 0x79, 0x34, 0x2e, 0xd2, 0x1b, 0xbf, 0xd1, 0xe0, 0x62, 0x69, 0x27, 0xa9,
+ 0xfc, 0x80, 0x42, 0x00, 0x7c, 0x12, 0x90, 0x2e, 0xe5, 0x34, 0x60, 0x43, 0xa6, 0x6b, 0x85, 0x77,
+ 0x89, 0x64, 0x70, 0x6f, 0x27, 0x82, 0x70, 0x46, 0xa8, 0xf1, 0xab, 0x1a, 0xcc, 0x60, 0x15, 0x8f,
+ 0x68, 0x55, 0xfc, 0xff, 0xaf, 0x0b, 0x77, 0x73, 0xeb, 0xc2, 0x73, 0x52, 0x23, 0x67, 0x5c, 0xd9,
+ 0xc2, 0x80, 0xee, 0x8b, 0x25, 0x9a, 0xf0, 0x90, 0x55, 0xbb, 0xf8, 0xe4, 0x85, 0x4a, 0xc6, 0x34,
+ 0x08, 0xd1, 0x37, 0x56, 0x02, 0x8d, 0xbe, 0x06, 0x8d, 0x1c, 0xbd, 0xe8, 0xf4, 0x61, 0x97, 0x06,
+ 0x98, 0xee, 0xd1, 0x80, 0xba, 0x16, 0x45, 0x57, 0x60, 0x92, 0xf8, 0xf6, 0x8d, 0xc0, 0x0b, 0x7d,
+ 0x15, 0xd1, 0x64, 0x94, 0xaf, 0x6e, 0x6f, 0x4a, 0x38, 0x4e, 0x28, 0x04, 0x75, 0x6c, 0x91, 0xca,
+ 0xab, 0xcc, 0x7a, 0x1d, 0xc1, 0x71, 0x42, 0x91, 0xb4, 0xef, 0xd1, 0xd2, 0xf6, 0x6d, 0x82, 0x1e,
+ 0xda, 0x1d, 0x75, 0x27, 0x78, 0x43, 0x11, 0xe8, 0xef, 0x6f, 0xae, 0x3f, 0xeb, 0x35, 0x2f, 0x95,
+ 0x3d, 0xfe, 0xf1, 0x63, 0x9f, 0xb2, 0xd6, 0xfb, 0x9b, 0xeb, 0x58, 0x30, 0x1b, 0x7f, 0xd6, 0xe0,
+ 0x5c, 0xee, 0x90, 0x67, 0xb0, 0xd2, 0x6c, 0xe7, 0x57, 0x9a, 0xd7, 0x4e, 0x11, 0xb2, 0x92, 0xa5,
+ 0xc6, 0x2e, 0x1c, 0x42, 0x6e, 0x35, 0xef, 0x15, 0x1f, 0xc3, 0x2e, 0x57, 0xbe, 0x39, 0x94, 0xbf,
+ 0x80, 0x19, 0x7f, 0xab, 0xc1, 0xf9, 0x21, 0x59, 0x84, 0x1e, 0x00, 0xa4, 0x33, 0x66, 0x88, 0xd3,
+ 0x86, 0x28, 0x1c, 0xb8, 0xe7, 0xce, 0xca, 0x27, 0xaa, 0x14, 0x9a, 0x91, 0x88, 0x18, 0xd4, 0x03,
+ 0xca, 0x68, 0x70, 0x44, 0x3b, 0xef, 0x7a, 0x81, 0x72, 0xdd, 0xb7, 0x4f, 0xe1, 0xba, 0x81, 0xec,
+ 0x4d, 0xef, 0x5e, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xd0, 0x83, 0xd4, 0x85, 0xd1, 0xdb, 0xeb, 0xb5,
+ 0x4a, 0x27, 0xca, 0x3f, 0x1b, 0x9f, 0xe0, 0xcc, 0x7f, 0x6a, 0x70, 0x21, 0x67, 0xe4, 0x7b, 0xb4,
+ 0xeb, 0x3b, 0x84, 0xd3, 0x33, 0x68, 0x46, 0xf7, 0x73, 0xcd, 0xe8, 0xed, 0x53, 0x78, 0x32, 0x36,
+ 0xb2, 0xf4, 0x16, 0xf3, 0x0f, 0x0d, 0x2e, 0x0e, 0xe5, 0x38, 0x83, 0xe2, 0xfa, 0x5e, 0xbe, 0xb8,
+ 0xae, 0x7d, 0x86, 0x73, 0x95, 0xdf, 0x1c, 0x2e, 0x96, 0xfa, 0xe1, 0x0b, 0x39, 0x3d, 0x8c, 0x3f,
+ 0x68, 0x30, 0x1d, 0x53, 0x8a, 0x75, 0xa9, 0xc2, 0xce, 0xbc, 0x02, 0xa0, 0xfe, 0x30, 0x89, 0x6f,
+ 0xf7, 0x7a, 0x6a, 0xf7, 0x8d, 0x04, 0x83, 0x33, 0x54, 0xe8, 0x26, 0xa0, 0xd8, 0xc2, 0x1d, 0x47,
+ 0x2e, 0x05, 0x62, 0xf5, 0xd4, 0x25, 0xef, 0xa2, 0xe2, 0x45, 0x78, 0x80, 0x02, 0x0f, 0xe1, 0x32,
+ 0xfe, 0xa2, 0xa5, 0x73, 0x5b, 0x82, 0x5f, 0x54, 0xcf, 0x4b, 0xe3, 0x4a, 0x3d, 0x9f, 0x9d, 0x3b,
+ 0x92, 0xf2, 0x85, 0x9d, 0x3b, 0xd2, 0xba, 0x92, 0x92, 0x78, 0xa4, 0x17, 0x4e, 0x21, 0x4b, 0xa1,
+ 0xea, 0x96, 0x77, 0x2b, 0xf3, 0x37, 0x59, 0x7d, 0xe5, 0xd5, 0x6a, 0xe6, 0x88, 0x34, 0x1d, 0xba,
+ 0xe3, 0x5f, 0x81, 0x49, 0xd7, 0xeb, 0x44, 0xfb, 0x70, 0x61, 0xbb, 0xd8, 0x52, 0x70, 0x9c, 0x50,
+ 0x0c, 0xfc, 0x91, 0x33, 0xfa, 0xf9, 0xfc, 0x91, 0x23, 0x37, 0x22, 0xc7, 0x11, 0x04, 0xf1, 0xf5,
+ 0x21, 0xdd, 0x88, 0x14, 0x1c, 0x27, 0x14, 0xe8, 0x4e, 0x3a, 0x5f, 0xc6, 0x65, 0x4c, 0xbe, 0x56,
+ 0x65, 0x44, 0x97, 0x0f, 0x14, 0xd3, 0x7c, 0xfc, 0xb4, 0x31, 0xf2, 0xe4, 0x69, 0x63, 0xe4, 0x93,
+ 0xa7, 0x8d, 0x91, 0x8f, 0xfb, 0x0d, 0xed, 0x71, 0xbf, 0xa1, 0x3d, 0xe9, 0x37, 0xb4, 0x4f, 0xfa,
+ 0x0d, 0xed, 0xd3, 0x7e, 0x43, 0xfb, 0xc5, 0xbf, 0x1b, 0x23, 0xdf, 0x7f, 0xf9, 0xa4, 0x7f, 0x95,
+ 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x60, 0x85, 0x64, 0x74, 0x1e, 0x00, 0x00,
+}
+
+func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NetworkData != nil {
+ {
+ size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.Device)
+ copy(dAtA[i:], m.Device)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Pool)
+ copy(dAtA[i:], m.Pool)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
@@ -1256,11 +1240,6 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- i -= len(m.Controller)
- copy(dAtA[i:], m.Controller)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
- i--
- dAtA[i] = 0x22
if m.NodeSelector != nil {
{
size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
@@ -1835,18 +1814,6 @@ func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- if m.SuitableNodes != nil {
- {
- size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
if len(m.Config) > 0 {
for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -1972,14 +1939,16 @@ func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- i--
- if m.AdminAccess {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if m.AdminAccess != nil {
+ i--
+ if *m.AdminAccess {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
}
- i--
- dAtA[i] = 0x30
i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
i--
dAtA[i] = 0x28
@@ -2035,6 +2004,16 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.AdminAccess != nil {
+ i--
+ if *m.AdminAccess {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
i -= len(m.Device)
copy(dAtA[i:], m.Device)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
@@ -2093,7 +2072,7 @@ func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2103,88 +2082,39 @@ func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- i -= len(m.Driver)
- copy(dAtA[i:], m.Driver)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) {
+func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ i -= len(m.HardwareAddress)
+ copy(dAtA[i:], m.HardwareAddress)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
i--
dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if len(m.IPs) > 0 {
+ for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.IPs[iNdEx])
+ copy(dAtA[i:], m.IPs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
+ i--
+ dAtA[i] = 0x12
}
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i -= len(m.InterfaceName)
+ copy(dAtA[i:], m.InterfaceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) {
+func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2194,32 +2124,18 @@ func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) {
+func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
{
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2227,84 +2143,15 @@ func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.PotentialNodes) > 0 {
- for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PotentialNodes[iNdEx])
- copy(dAtA[i:], m.PotentialNodes[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.SelectedNode)
- copy(dAtA[i:], m.SelectedNode)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode)))
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.ResourceClaims) > 0 {
- for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2448,43 +2295,6 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.UnsuitableNodes) > 0 {
- for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.UnsuitableNodes[iNdEx])
- copy(dAtA[i:], m.UnsuitableNodes[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2505,11 +2315,6 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- i -= len(m.Controller)
- copy(dAtA[i:], m.Controller)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
- i--
- dAtA[i] = 0x12
{
size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -2543,14 +2348,20 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- i--
- if m.DeallocationRequested {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
+ if len(m.Devices) > 0 {
+ for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
}
- i--
- dAtA[i] = 0x18
if len(m.ReservedFor) > 0 {
for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -2925,6 +2736,33 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *AllocatedDeviceStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NetworkData != nil {
+ l = m.NetworkData.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func (m *AllocationResult) Size() (n int) {
if m == nil {
return 0
@@ -2937,8 +2775,6 @@ func (m *AllocationResult) Size() (n int) {
l = m.NodeSelector.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- l = len(m.Controller)
- n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -3161,10 +2997,6 @@ func (m *DeviceClassSpec) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
- if m.SuitableNodes != nil {
- l = m.SuitableNodes.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
return n
}
@@ -3219,7 +3051,9 @@ func (m *DeviceRequest) Size() (n int) {
l = len(m.AllocationMode)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.Count))
- n += 2
+ if m.AdminAccess != nil {
+ n += 2
+ }
return n
}
@@ -3237,6 +3071,9 @@ func (m *DeviceRequestAllocationResult) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Device)
n += 1 + l + sovGenerated(uint64(l))
+ if m.AdminAccess != nil {
+ n += 2
+ }
return n
}
@@ -3253,6 +3090,25 @@ func (m *DeviceSelector) Size() (n int) {
return n
}
+func (m *NetworkDeviceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.InterfaceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.IPs) > 0 {
+ for _, s := range m.IPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.HardwareAddress)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *OpaqueDeviceConfiguration) Size() (n int) {
if m == nil {
return 0
@@ -3266,7 +3122,7 @@ func (m *OpaqueDeviceConfiguration) Size() (n int) {
return n
}
-func (m *PodSchedulingContext) Size() (n int) {
+func (m *ResourceClaim) Size() (n int) {
if m == nil {
return 0
}
@@ -3281,7 +3137,24 @@ func (m *PodSchedulingContext) Size() (n int) {
return n
}
-func (m *PodSchedulingContextList) Size() (n int) {
+func (m *ResourceClaimConsumerReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimList) Size() (n int) {
if m == nil {
return 0
}
@@ -3298,113 +3171,13 @@ func (m *PodSchedulingContextList) Size() (n int) {
return n
}
-func (m *PodSchedulingContextSpec) Size() (n int) {
+func (m *ResourceClaimSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.SelectedNode)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.PotentialNodes) > 0 {
- for _, s := range m.PotentialNodes {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *PodSchedulingContextStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.ResourceClaims) > 0 {
- for _, e := range m.ResourceClaims {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaim) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimConsumerReference) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.APIGroup)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Resource)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.UID)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *ResourceClaimList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaimSchedulingStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.UnsuitableNodes) > 0 {
- for _, s := range m.UnsuitableNodes {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ResourceClaimSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Devices.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Controller)
+ l = m.Devices.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -3425,7 +3198,12 @@ func (m *ResourceClaimStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
- n += 2
+ if len(m.Devices) > 0 {
+ for _, e := range m.Devices {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -3547,14 +3325,33 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *AllocatedDeviceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&AllocatedDeviceStatus{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
+ `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *AllocationResult) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&AllocationResult{`,
`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
- `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`,
- `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
`}`,
}, "")
return s
@@ -3700,7 +3497,7 @@ func (this *DeviceClass) String() string {
return "nil"
}
s := strings.Join([]string{`&DeviceClass{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
@@ -3726,7 +3523,7 @@ func (this *DeviceClassList) String() string {
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&DeviceClassList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
@@ -3749,7 +3546,6 @@ func (this *DeviceClassSpec) String() string {
s := strings.Join([]string{`&DeviceClassSpec{`,
`Selectors:` + repeatedStringForSelectors + `,`,
`Config:` + repeatedStringForConfig + `,`,
- `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`,
`}`,
}, "")
return s
@@ -3790,7 +3586,7 @@ func (this *DeviceRequest) String() string {
`Selectors:` + repeatedStringForSelectors + `,`,
`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
- `AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`,
+ `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
`}`,
}, "")
return s
@@ -3804,6 +3600,7 @@ func (this *DeviceRequestAllocationResult) String() string {
`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+ `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
`}`,
}, "")
return s
@@ -3818,67 +3615,25 @@ func (this *DeviceSelector) String() string {
}, "")
return s
}
-func (this *OpaqueDeviceConfiguration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
- `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
- `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodSchedulingContext) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PodSchedulingContext{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodSchedulingContextList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]PodSchedulingContext{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&PodSchedulingContextList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodSchedulingContextSpec) String() string {
+func (this *NetworkDeviceData) String() string {
if this == nil {
return "nil"
}
- s := strings.Join([]string{`&PodSchedulingContextSpec{`,
- `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`,
- `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`,
+ s := strings.Join([]string{`&NetworkDeviceData{`,
+ `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`,
+ `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`,
+ `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`,
`}`,
}, "")
return s
}
-func (this *PodSchedulingContextStatus) String() string {
+func (this *OpaqueDeviceConfiguration) String() string {
if this == nil {
return "nil"
}
- repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{"
- for _, f := range this.ResourceClaims {
- repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + ","
- }
- repeatedStringForResourceClaims += "}"
- s := strings.Join([]string{`&PodSchedulingContextStatus{`,
- `ResourceClaims:` + repeatedStringForResourceClaims + `,`,
+ s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -3888,7 +3643,7 @@ func (this *ResourceClaim) String() string {
return "nil"
}
s := strings.Join([]string{`&ResourceClaim{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
`}`,
@@ -3918,30 +3673,18 @@ func (this *ResourceClaimList) String() string {
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ResourceClaimList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
return s
}
-func (this *ResourceClaimSchedulingStatus) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`,
- `}`,
- }, "")
- return s
-}
func (this *ResourceClaimSpec) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ResourceClaimSpec{`,
`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
- `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
`}`,
}, "")
return s
@@ -3955,10 +3698,15 @@ func (this *ResourceClaimStatus) String() string {
repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
}
repeatedStringForReservedFor += "}"
+ repeatedStringForDevices := "[]AllocatedDeviceStatus{"
+ for _, f := range this.Devices {
+ repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDevices += "}"
s := strings.Join([]string{`&ResourceClaimStatus{`,
`Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
`ReservedFor:` + repeatedStringForReservedFor + `,`,
- `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`,
+ `Devices:` + repeatedStringForDevices + `,`,
`}`,
}, "")
return s
@@ -3968,7 +3716,7 @@ func (this *ResourceClaimTemplate) String() string {
return "nil"
}
s := strings.Join([]string{`&ResourceClaimTemplate{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
@@ -3984,7 +3732,7 @@ func (this *ResourceClaimTemplateList) String() string {
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ResourceClaimTemplateList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
@@ -3995,7 +3743,7 @@ func (this *ResourceClaimTemplateSpec) String() string {
return "nil"
}
s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
@@ -4018,7 +3766,7 @@ func (this *ResourceSlice) String() string {
return "nil"
}
s := strings.Join([]string{`&ResourceSlice{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
@@ -4034,7 +3782,7 @@ func (this *ResourceSliceList) String() string {
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&ResourceSliceList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`Items:` + repeatedStringForItems + `,`,
`}`,
}, "")
@@ -4053,7 +3801,7 @@ func (this *ResourceSliceSpec) String() string {
`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
`Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
- `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
`AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`,
`Devices:` + repeatedStringForDevices + `,`,
`}`,
@@ -4068,7 +3816,7 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
-func (m *AllocationResult) Unmarshal(dAtA []byte) error {
+func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4091,17 +3839,17 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
+ return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4111,30 +3859,29 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Driver = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 3:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4144,31 +3891,27 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NodeSelector == nil {
- m.NodeSelector = &v1.NodeSelector{}
- }
- if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Pool = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 4:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4196,61 +3939,11 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Controller = string(dAtA[iNdEx:postIndex])
+ m.Device = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *BasicDevice) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4277,109 +3970,14 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Attributes == nil {
- m.Attributes = make(map[QualifiedName]DeviceAttribute)
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- var mapkey QualifiedName
- mapvalue := &DeviceAttribute{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &DeviceAttribute{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Attributes[QualifiedName(mapkey)] = *mapvalue
iNdEx = postIndex
- case 2:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4406,161 +4004,15 @@ func (m *BasicDevice) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Capacity == nil {
- m.Capacity = make(map[QualifiedName]resource.Quantity)
- }
- var mapkey QualifiedName
- mapvalue := &resource.Quantity{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Capacity[QualifiedName(mapkey)] = *mapvalue
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ iNdEx = postIndex
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4570,23 +4022,27 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Expression = string(dAtA[iNdEx:postIndex])
+ if m.NetworkData == nil {
+ m.NetworkData = &NetworkDeviceData{}
+ }
+ if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4609,7 +4065,7 @@ func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *Device) Unmarshal(dAtA []byte) error {
+func (m *AllocationResult) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4632,17 +4088,17 @@ func (m *Device) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: Device: wiretype end group for non-group")
+ return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4652,27 +4108,28 @@ func (m *Device) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4699,10 +4156,10 @@ func (m *Device) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Basic == nil {
- m.Basic = &BasicDevice{}
+ if m.NodeSelector == nil {
+ m.NodeSelector = &v11.NodeSelector{}
}
- if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -4727,7 +4184,7 @@ func (m *Device) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
+func (m *BasicDevice) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4750,17 +4207,17 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4770,59 +4227,124 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Attributes == nil {
+ m.Attributes = make(map[QualifiedName]DeviceAttribute)
+ }
+ var mapkey QualifiedName
+ mapvalue := &DeviceAttribute{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &DeviceAttribute{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Attributes[QualifiedName(mapkey)] = *mapvalue
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4849,9 +4371,105 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ if m.Capacity == nil {
+ m.Capacity = make(map[QualifiedName]resource.Quantity)
+ }
+ var mapkey QualifiedName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.Capacity[QualifiedName(mapkey)] = *mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4874,7 +4492,7 @@ func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
+func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4897,51 +4515,17 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
+ return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Results = append(m.Results, DeviceRequestAllocationResult{})
- if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -4951,25 +4535,23 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Config = append(m.Config, DeviceAllocationConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Expression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4992,7 +4574,7 @@ func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
+func (m *Device) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5012,59 +4594,18 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
break
}
}
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IntValue = &v
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.BoolValue = &b
- case 4:
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Device: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5092,14 +4633,13 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.StringValue = &s
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5109,24 +4649,27 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.VersionValue = &s
+ if m.Basic == nil {
+ m.Basic = &BasicDevice{}
+ }
+ if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5149,7 +4692,7 @@ func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
+func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5172,17 +4715,17 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5192,31 +4735,29 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, DeviceRequest{})
- if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5226,29 +4767,27 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Constraints = append(m.Constraints, DeviceConstraint{})
- if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5275,8 +4814,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Config = append(m.Config, DeviceClaimConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5301,7 +4839,7 @@ func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5324,17 +4862,17 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5344,27 +4882,29 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ m.Results = append(m.Results, DeviceRequestAllocationResult{})
+ if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5391,7 +4931,8 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Config = append(m.Config, DeviceAllocationConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5416,7 +4957,7 @@ func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClass) Unmarshal(dAtA []byte) error {
+func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5439,17 +4980,58 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
- case 1:
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IntValue = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.BoolValue = &b
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5459,30 +5041,30 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.StringValue = &s
iNdEx = postIndex
- case 2:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5492,24 +5074,24 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.VersionValue = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5532,7 +5114,7 @@ func (m *DeviceClass) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5542,28 +5124,96 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
if shift >= 64 {
return ErrIntOverflowGenerated
}
- if iNdEx >= l {
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, DeviceRequest{})
+ if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ m.Constraints = append(m.Constraints, DeviceConstraint{})
+ if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ iNdEx = postIndex
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5590,7 +5240,8 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Config = append(m.Config, DeviceClaimConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5615,7 +5266,7 @@ func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
+func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5638,17 +5289,17 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -5658,28 +5309,27 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5706,8 +5356,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, DeviceClass{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5732,7 +5381,7 @@ func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
+func (m *DeviceClass) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5755,15 +5404,15 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5790,48 +5439,13 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Config = append(m.Config, DeviceClassConfiguration{})
- if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5858,10 +5472,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.SuitableNodes == nil {
- m.SuitableNodes = &v1.NodeSelector{}
- }
- if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5886,7 +5497,7 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
+func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5909,15 +5520,15 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5944,10 +5555,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Opaque == nil {
- m.Opaque = &OpaqueDeviceConfiguration{}
- }
- if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5972,7 +5580,7 @@ func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
+func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5995,17 +5603,17 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6015,29 +5623,30 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6047,24 +5656,25 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := FullyQualifiedName(dAtA[iNdEx:postIndex])
- m.MatchAttribute = &s
+ m.Items = append(m.Items, DeviceClass{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -6085,102 +5695,38 @@ func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
if iNdEx > l {
return io.ErrUnexpectedEOF
}
- return nil
-}
-func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+ return nil
+}
+func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
}
- if postIndex > l {
+ if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- m.DeviceClassName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
@@ -6214,11 +5760,11 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 4:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6228,63 +5774,26 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Count |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ m.Config = append(m.Config, DeviceClassConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.AdminAccess = bool(v != 0)
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6306,7 +5815,7 @@ func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
+func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6329,17 +5838,17 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6349,59 +5858,81 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Request = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ if m.Opaque == nil {
+ m.Opaque = &OpaqueDeviceConfiguration{}
}
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
}
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
- if postIndex > l {
+ if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.Driver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6429,11 +5960,11 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Pool = string(dAtA[iNdEx:postIndex])
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
- case 4:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6461,7 +5992,8 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Device = string(dAtA[iNdEx:postIndex])
+ s := FullyQualifiedName(dAtA[iNdEx:postIndex])
+ m.MatchAttribute = &s
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -6484,7 +6016,7 @@ func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
+func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6507,15 +6039,79 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DeviceClassName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6542,66 +6138,14 @@ func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.CEL == nil {
- m.CEL = &CELDeviceSelector{}
- }
- if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6629,13 +6173,13 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Driver = string(dAtA[iNdEx:postIndex])
+ m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
- var msglen int
+ m.Count = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6645,25 +6189,32 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.Count |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
}
- if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- iNdEx = postIndex
+ b := bool(v != 0)
+ m.AdminAccess = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6685,7 +6236,7 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
+func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6708,17 +6259,17 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6728,30 +6279,29 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Request = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6761,30 +6311,29 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Driver = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -6794,25 +6343,77 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.Pool = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Device = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AdminAccess = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6834,7 +6435,7 @@ func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
+func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6847,58 +6448,25 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
- iNdEx = postIndex
- case 2:
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6925,8 +6493,10 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, PodSchedulingContext{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.CEL == nil {
+ m.CEL = &CELDeviceSelector{}
+ }
+ if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -6951,7 +6521,7 @@ func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
+func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6974,15 +6544,15 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -7010,11 +6580,43 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.SelectedNode = string(dAtA[iNdEx:postIndex])
+ m.InterfaceName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -7042,7 +6644,7 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex]))
+ m.HardwareAddress = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -7065,7 +6667,7 @@ func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
+func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -7088,15 +6690,47 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -7123,8 +6757,7 @@ func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{})
- if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -7593,120 +7226,6 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -7769,38 +7288,6 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Controller = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -7921,11 +7408,11 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
}
- var v int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -7935,12 +7422,26 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.DeallocationRequested = bool(v != 0)
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Devices = append(m.Devices, AllocatedDeviceStatus{})
+ if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -8820,7 +8321,7 @@ func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.NodeSelector == nil {
- m.NodeSelector = &v1.NodeSelector{}
+ m.NodeSelector = &v11.NodeSelector{}
}
if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
index b4428ad45..e802a0143 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto
+++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto
@@ -30,6 +30,56 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/resource/v1alpha3";
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+message AllocatedDeviceStatus {
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ optional string driver = 1;
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ optional string pool = 2;
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ optional string device = 3;
+
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4;
+
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5;
+
+ // NetworkData contains network-related information specific to the device.
+ //
+ // +optional
+ optional NetworkDeviceData networkData = 6;
+}
+
// AllocationResult contains attributes of an allocated resource.
message AllocationResult {
// Devices is the result of allocating devices.
@@ -42,22 +92,6 @@ message AllocationResult {
//
// +optional
optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
-
- // Controller is the name of the DRA driver which handled the
- // allocation. That driver is also responsible for deallocating the
- // claim. It is empty when the claim can be deallocated without
- // involving a driver.
- //
- // A driver may allocate devices provided by other drivers, so this
- // driver name here can be different from the driver names listed for
- // the results.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- optional string controller = 4;
}
// BasicDevice defines one device instance.
@@ -128,6 +162,10 @@ message CELDeviceSelector {
//
// cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
//
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
+ //
// +required
optional string expression = 1;
}
@@ -309,22 +347,6 @@ message DeviceClassSpec {
// +optional
// +listType=atomic
repeated DeviceClassConfiguration config = 2;
-
- // Only nodes matching the selector will be considered by the scheduler
- // when trying to find a Node that fits a Pod when that Pod uses
- // a claim that has not been allocated yet *and* that claim
- // gets allocated through a control plane controller. It is ignored
- // when the claim does not use a control plane controller
- // for allocation.
- //
- // Setting this field is optional. If unset, all Nodes are candidates.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3;
}
// DeviceConfiguration must have exactly one field set. It gets embedded
@@ -443,8 +465,12 @@ message DeviceRequest {
// all ordinary claims to the device with respect to access modes and
// any resource allocations.
//
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
// +optional
- // +default=false
+ // +featureGate=DRAAdminAccess
optional bool adminAccess = 6;
}
@@ -481,6 +507,18 @@ message DeviceRequestAllocationResult {
//
// +required
optional string device = 4;
+
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ optional bool adminAccess = 5;
}
// DeviceSelector must have exactly one field set.
@@ -492,6 +530,37 @@ message DeviceSelector {
optional CELDeviceSelector cel = 1;
}
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
+message NetworkDeviceData {
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ //
+ // +optional
+ optional string interfaceName = 1;
+
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ //
+ // +optional
+ // +listType=atomic
+ repeated string ips = 2;
+
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ //
+ // +optional
+ optional string hardwareAddress = 3;
+}
+
// OpaqueDeviceConfiguration contains configuration parameters for a driver
// in a format defined by the driver vendor.
message OpaqueDeviceConfiguration {
@@ -512,73 +581,12 @@ message OpaqueDeviceConfiguration {
// includes self-identification and a version ("kind" + "apiVersion" for
// Kubernetes types), with conversion between different versions.
//
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
// +required
optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2;
}
-// PodSchedulingContext objects hold information that is needed to schedule
-// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
-// mode.
-//
-// This is an alpha type and requires enabling the DRAControlPlaneController
-// feature gate.
-message PodSchedulingContext {
- // Standard object metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // Spec describes where resources for the Pod are needed.
- optional PodSchedulingContextSpec spec = 2;
-
- // Status describes where resources for the Pod can be allocated.
- //
- // +optional
- optional PodSchedulingContextStatus status = 3;
-}
-
-// PodSchedulingContextList is a collection of Pod scheduling objects.
-message PodSchedulingContextList {
- // Standard list metadata
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
- // Items is the list of PodSchedulingContext objects.
- repeated PodSchedulingContext items = 2;
-}
-
-// PodSchedulingContextSpec describes where resources for the Pod are needed.
-message PodSchedulingContextSpec {
- // SelectedNode is the node for which allocation of ResourceClaims that
- // are referenced by the Pod and that use "WaitForFirstConsumer"
- // allocation is to be attempted.
- //
- // +optional
- optional string selectedNode = 1;
-
- // PotentialNodes lists nodes where the Pod might be able to run.
- //
- // The size of this field is limited to 128. This is large enough for
- // many clusters. Larger clusters may need more attempts to find a node
- // that suits all pending resources. This may get increased in the
- // future, but not reduced.
- //
- // +optional
- // +listType=atomic
- repeated string potentialNodes = 2;
-}
-
-// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
-message PodSchedulingContextStatus {
- // ResourceClaims describes resource availability for each
- // pod.spec.resourceClaim entry where the corresponding ResourceClaim
- // uses "WaitForFirstConsumer" allocation mode.
- //
- // +listType=map
- // +listMapKey=name
- // +optional
- repeated ResourceClaimSchedulingStatus resourceClaims = 1;
-}
-
// ResourceClaim describes a request for access to resources in the cluster,
// for use by workloads. For example, if a workload needs an accelerator device
// with specific properties, this is how that request is expressed. The status
@@ -634,46 +642,12 @@ message ResourceClaimList {
repeated ResourceClaim items = 2;
}
-// ResourceClaimSchedulingStatus contains information about one particular
-// ResourceClaim with "WaitForFirstConsumer" allocation mode.
-message ResourceClaimSchedulingStatus {
- // Name matches the pod.spec.resourceClaims[*].Name field.
- //
- // +required
- optional string name = 1;
-
- // UnsuitableNodes lists nodes that the ResourceClaim cannot be
- // allocated for.
- //
- // The size of this field is limited to 128, the same as for
- // PodSchedulingSpec.PotentialNodes. This may get increased in the
- // future, but not reduced.
- //
- // +optional
- // +listType=atomic
- repeated string unsuitableNodes = 2;
-}
-
// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
message ResourceClaimSpec {
// Devices defines how to request devices.
//
// +optional
optional DeviceClaim devices = 1;
-
- // Controller is the name of the DRA driver that is meant
- // to handle allocation of this claim. If empty, allocation is handled
- // by the scheduler while scheduling a pod.
- //
- // Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- optional string controller = 2;
}
// ResourceClaimStatus tracks whether the resource has been allocated and what
@@ -701,7 +675,7 @@ message ResourceClaimStatus {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
- // There can be at most 32 such reservations. This may get increased in
+ // There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional
@@ -711,19 +685,17 @@ message ResourceClaimStatus {
// +patchMergeKey=uid
repeated ResourceClaimConsumerReference reservedFor = 2;
- // Indicates that a claim is to be deallocated. While this is set,
- // no new consumers may be added to ReservedFor.
- //
- // This is only used if the claim needs to be deallocated by a DRA driver.
- // That driver then must deallocate this claim and reset the field
- // together with clearing the Allocation field.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
//
// +optional
- // +featureGate=DRAControlPlaneController
- optional bool deallocationRequested = 3;
+ // +listType=map
+ // +listMapKey=driver
+ // +listMapKey=device
+ // +listMapKey=pool
+ // +featureGate=DRAResourceClaimDeviceStatus
+ repeated AllocatedDeviceStatus devices = 4;
}
// ResourceClaimTemplate is used to produce ResourceClaim objects.
@@ -755,7 +727,7 @@ message ResourceClaimTemplateList {
// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
message ResourceClaimTemplateSpec {
- // ObjectMeta may contain labels and annotations that will be copied into the PVC
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
// when creating it. No other fields are allowed and will be rejected during
// validation.
// +optional
diff --git a/vendor/k8s.io/api/resource/v1alpha3/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go
index 74044e8cf..8573758e3 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/register.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/register.go
@@ -50,8 +50,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimList{},
&ResourceClaimTemplate{},
&ResourceClaimTemplateList{},
- &PodSchedulingContext{},
- &PodSchedulingContextList{},
&ResourceSlice{},
&ResourceSliceList{},
)
diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go
index 4efd2491d..49d7c86de 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/types.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/types.go
@@ -37,6 +37,7 @@ const (
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSlice
// ResourceSlice represents one or more resources in a pool of similar resources,
// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
@@ -144,6 +145,10 @@ type ResourceSliceSpec struct {
Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
}
+// DriverNameMaxLength is the maximum valid length of a driver name in the
+// ResourceSliceSpec and other places. It's the same as for CSI driver names.
+const DriverNameMaxLength = 63
+
// ResourcePool describes the pool that ResourceSlices belong to.
type ResourcePool struct {
// Name is used to identify the pool. For node-local devices, this
@@ -220,7 +225,7 @@ type BasicDevice struct {
Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"`
}
-// Limit for the sum of the number of entries in both ResourceSlices.
+// Limit for the sum of the number of entries in both attributes and capacity.
const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
// QualifiedName is the name of a device attribute or capacity.
@@ -244,6 +249,9 @@ type QualifiedName string
// FullyQualifiedName is a QualifiedName where the domain is set.
type FullyQualifiedName string
+// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name.
+const DeviceMaxDomainLength = 63
+
// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`).
const DeviceMaxIDLength = 32
@@ -284,6 +292,7 @@ const DeviceAttributeMaxValueLength = 64
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSliceList
// ResourceSliceList is a collection of ResourceSlices.
type ResourceSliceList struct {
@@ -298,7 +307,8 @@ type ResourceSliceList struct {
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaim
// ResourceClaim describes a request for access to resources in the cluster,
// for use by workloads. For example, if a workload needs an accelerator device
@@ -330,19 +340,10 @@ type ResourceClaimSpec struct {
// +optional
Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"`
- // Controller is the name of the DRA driver that is meant
- // to handle allocation of this claim. If empty, allocation is handled
- // by the scheduler while scheduling a pod.
- //
- // Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"`
+ // Controller is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"`
}
// DeviceClaim defines how to request devices with a ResourceClaim.
@@ -368,6 +369,12 @@ type DeviceClaim struct {
// +optional
// +listType=atomic
Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
+
+ // Potential future extension, ignored by older schedulers. This is
+ // fine because scoring allows users to define a preference, without
+ // making it a hard requirement.
+ //
+ // Score *SomeScoringStruct
}
const (
@@ -451,9 +458,13 @@ type DeviceRequest struct {
// all ordinary claims to the device with respect to access modes and
// any resource allocations.
//
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
// +optional
- // +default=false
- AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
+ // +featureGate=DRAAdminAccess
+ AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
}
const (
@@ -526,10 +537,42 @@ type CELDeviceSelector struct {
//
// cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
//
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
+ //
// +required
Expression string `json:"expression" protobuf:"bytes,1,name=expression"`
}
+// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector
+// evaluation.
+//
+// There is no overall budget for selecting a device, so the actual time
+// required for that is proportional to the number of CEL selectors and how
+// often they need to be evaluated, which can vary depending on several factors
+// (number of devices, cluster utilization, additional constraints).
+//
+// Validation against this limit and [CELSelectorExpressionMaxLength] happens
+// only when setting an expression for the first time or when changing it. If
+// the limits are changed in a future Kubernetes release, existing users are
+// guaranteed that existing expressions will continue to be valid.
+//
+// However, the kube-scheduler also applies this cost limit at runtime, so it
+// could happen that a valid expression fails at runtime after an up- or
+// downgrade. This can also happen without version skew when the cost estimate
+// underestimated the actual cost. That this might happen is the reason why
+// kube-scheduler enforces the runtime limit instead of relying on validation.
+//
+// According to
+// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22,
+// this gives roughly 0.1 second for each expression evaluation.
+// However, this depends on how fast the machine is.
+const CELSelectorExpressionMaxCost = 1000000
+
+// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string.
+const CELSelectorExpressionMaxLength = 10 * 1024
+
// DeviceConstraint must have exactly one field set besides Requests.
type DeviceConstraint struct {
// Requests is a list of the one or more requests in this claim which
@@ -558,6 +601,16 @@ type DeviceConstraint struct {
// +optional
// +oneOf=ConstraintType
MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
+
+ // Potential future extension, not part of the current design:
+ // A CEL expression which compares different devices and returns
+ // true if they match.
+ //
+ // Because it would be part of a one-of, old schedulers will not
+ // accidentally ignore this additional, for them unknown match
+ // criteria.
+ //
+ // MatchExpression string
}
// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
@@ -603,10 +656,16 @@ type OpaqueDeviceConfiguration struct {
// includes self-identification and a version ("kind" + "apiVersion" for
// Kubernetes types), with conversion between different versions.
//
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
// +required
Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"`
}
+// OpaqueParametersMaxLength is the maximum length of the raw data in an
+// [OpaqueDeviceConfiguration.Parameters] field.
+const OpaqueParametersMaxLength = 10 * 1024
+
// ResourceClaimStatus tracks whether the resource has been allocated and what
// the result of that was.
type ResourceClaimStatus struct {
@@ -632,7 +691,7 @@ type ResourceClaimStatus struct {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
- // There can be at most 32 such reservations. This may get increased in
+ // There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional
@@ -642,24 +701,27 @@ type ResourceClaimStatus struct {
// +patchMergeKey=uid
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
- // Indicates that a claim is to be deallocated. While this is set,
- // no new consumers may be added to ReservedFor.
- //
- // This is only used if the claim needs to be deallocated by a DRA driver.
- // That driver then must deallocate this claim and reset the field
- // together with clearing the Allocation field.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
+ // DeallocationRequested is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"`
+
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
//
// +optional
- // +featureGate=DRAControlPlaneController
- DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"`
+ // +listType=map
+ // +listMapKey=driver
+ // +listMapKey=device
+ // +listMapKey=pool
+ // +featureGate=DRAResourceClaimDeviceStatus
+ Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
-// ReservedForMaxSize is the maximum number of entries in
+// ResourceClaimReservedForMaxSize is the maximum number of entries in
// claim.status.reservedFor.
-const ResourceClaimReservedForMaxSize = 32
+const ResourceClaimReservedForMaxSize = 256
// ResourceClaimConsumerReference contains enough information to let you
// locate the consumer of a ResourceClaim. The user must be a resource in the same
@@ -694,21 +756,10 @@ type AllocationResult struct {
// +optional
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"`
- // Controller is the name of the DRA driver which handled the
- // allocation. That driver is also responsible for deallocating the
- // claim. It is empty when the claim can be deallocated without
- // involving a driver.
- //
- // A driver may allocate devices provided by other drivers, so this
- // driver name here can be different from the driver names listed for
- // the results.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"`
+ // Controller is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"`
}
// DeviceAllocationResult is the result of allocating devices.
@@ -769,6 +820,18 @@ type DeviceRequestAllocationResult struct {
//
// +required
Device string `json:"device" protobuf:"bytes,4,name=device"`
+
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"`
}
// DeviceAllocationConfiguration gets embedded in an AllocationResult.
@@ -799,7 +862,8 @@ const (
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimList
// ResourceClaimList is a collection of claims.
type ResourceClaimList struct {
@@ -812,111 +876,11 @@ type ResourceClaimList struct {
Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
-
-// PodSchedulingContext objects hold information that is needed to schedule
-// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
-// mode.
-//
-// This is an alpha type and requires enabling the DRAControlPlaneController
-// feature gate.
-type PodSchedulingContext struct {
- metav1.TypeMeta `json:",inline"`
- // Standard object metadata
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // Spec describes where resources for the Pod are needed.
- Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"`
-
- // Status describes where resources for the Pod can be allocated.
- //
- // +optional
- Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
-}
-
-// PodSchedulingContextSpec describes where resources for the Pod are needed.
-type PodSchedulingContextSpec struct {
- // SelectedNode is the node for which allocation of ResourceClaims that
- // are referenced by the Pod and that use "WaitForFirstConsumer"
- // allocation is to be attempted.
- //
- // +optional
- SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"`
-
- // PotentialNodes lists nodes where the Pod might be able to run.
- //
- // The size of this field is limited to 128. This is large enough for
- // many clusters. Larger clusters may need more attempts to find a node
- // that suits all pending resources. This may get increased in the
- // future, but not reduced.
- //
- // +optional
- // +listType=atomic
- PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
-}
-
-// PodSchedulingContextStatus describes where resources for the Pod can be allocated.
-type PodSchedulingContextStatus struct {
- // ResourceClaims describes resource availability for each
- // pod.spec.resourceClaim entry where the corresponding ResourceClaim
- // uses "WaitForFirstConsumer" allocation mode.
- //
- // +listType=map
- // +listMapKey=name
- // +optional
- ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"`
-
- // If there ever is a need to support other kinds of resources
- // than ResourceClaim, then new fields could get added here
- // for those other resources.
-}
-
-// ResourceClaimSchedulingStatus contains information about one particular
-// ResourceClaim with "WaitForFirstConsumer" allocation mode.
-type ResourceClaimSchedulingStatus struct {
- // Name matches the pod.spec.resourceClaims[*].Name field.
- //
- // +required
- Name string `json:"name" protobuf:"bytes,1,name=name"`
-
- // UnsuitableNodes lists nodes that the ResourceClaim cannot be
- // allocated for.
- //
- // The size of this field is limited to 128, the same as for
- // PodSchedulingSpec.PotentialNodes. This may get increased in the
- // future, but not reduced.
- //
- // +optional
- // +listType=atomic
- UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"`
-}
-
-// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
-// node lists that are stored in PodSchedulingContext objects. This limit is part
-// of the API.
-const PodSchedulingNodeListMaxSize = 128
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
-
-// PodSchedulingContextList is a collection of Pod scheduling objects.
-type PodSchedulingContextList struct {
- metav1.TypeMeta `json:",inline"`
- // Standard list metadata
- // +optional
- metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // Items is the list of PodSchedulingContext objects.
- Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
-
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClass
// DeviceClass is a vendor- or admin-provided resource that contains
// device configuration and selectors. It can be referenced in
@@ -961,21 +925,10 @@ type DeviceClassSpec struct {
// +listType=atomic
Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
- // Only nodes matching the selector will be considered by the scheduler
- // when trying to find a Node that fits a Pod when that Pod uses
- // a claim that has not been allocated yet *and* that claim
- // gets allocated through a control plane controller. It is ignored
- // when the claim does not use a control plane controller
- // for allocation.
- //
- // Setting this field is optional. If unset, all Nodes are candidates.
- //
- // This is an alpha field and requires enabling the DRAControlPlaneController
- // feature gate.
- //
- // +optional
- // +featureGate=DRAControlPlaneController
- SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"`
+ // SuitableNodes is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"`
}
// DeviceClassConfiguration is used in DeviceClass.
@@ -984,7 +937,8 @@ type DeviceClassConfiguration struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClassList
// DeviceClassList is a collection of classes.
type DeviceClassList struct {
@@ -999,7 +953,8 @@ type DeviceClassList struct {
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplate
// ResourceClaimTemplate is used to produce ResourceClaim objects.
//
@@ -1021,7 +976,7 @@ type ResourceClaimTemplate struct {
// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
type ResourceClaimTemplateSpec struct {
- // ObjectMeta may contain labels and annotations that will be copied into the PVC
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
// when creating it. No other fields are allowed and will be rejected during
// validation.
// +optional
@@ -1034,7 +989,8 @@ type ResourceClaimTemplateSpec struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.26
+// +k8s:prerelease-lifecycle-gen:introduced=1.31
+// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplateList
// ResourceClaimTemplateList is a collection of claim templates.
type ResourceClaimTemplateList struct {
@@ -1046,3 +1002,84 @@ type ResourceClaimTemplateList struct {
// Items is the list of resource claim templates.
Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+type AllocatedDeviceStatus struct {
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"`
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"`
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ Device string `json:"device" protobuf:"bytes,3,rep,name=device"`
+
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"`
+
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +optional
+ Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"`
+
+ // NetworkData contains network-related information specific to the device.
+ //
+ // +optional
+ NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"`
+}
+
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
+type NetworkDeviceData struct {
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ //
+ // +optional
+ InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"`
+
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ //
+ // +optional
+ // +listType=atomic
+ IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"`
+
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ //
+ // +optional
+ HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"`
+}
diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
index 1a44a971d..b41609d11 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
@@ -27,11 +27,24 @@ package v1alpha3
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AllocatedDeviceStatus = map[string]string{
+ "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
+ "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
+ "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.",
+ "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
+ "networkData": "NetworkData contains network-related information specific to the device.",
+}
+
+func (AllocatedDeviceStatus) SwaggerDoc() map[string]string {
+ return map_AllocatedDeviceStatus
+}
+
var map_AllocationResult = map[string]string{
"": "AllocationResult contains attributes of an allocated resource.",
"devices": "Devices is the result of allocating devices.",
"nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.",
- "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
}
func (AllocationResult) SwaggerDoc() map[string]string {
@@ -50,7 +63,7 @@ func (BasicDevice) SwaggerDoc() map[string]string {
var map_CELDeviceSelector = map[string]string{
"": "CELDeviceSelector contains a CEL expression for selecting a device.",
- "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)",
+ "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.",
}
func (CELDeviceSelector) SwaggerDoc() map[string]string {
@@ -148,10 +161,9 @@ func (DeviceClassList) SwaggerDoc() map[string]string {
}
var map_DeviceClassSpec = map[string]string{
- "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
- "selectors": "Each selector must be satisfied by a device which is claimed via this class.",
- "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
- "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
+ "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
+ "selectors": "Each selector must be satisfied by a device which is claimed via this class.",
+ "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
}
func (DeviceClassSpec) SwaggerDoc() map[string]string {
@@ -184,7 +196,7 @@ var map_DeviceRequest = map[string]string{
"selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.",
"allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.",
"count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.",
- "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
+ "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
}
func (DeviceRequest) SwaggerDoc() map[string]string {
@@ -192,11 +204,12 @@ func (DeviceRequest) SwaggerDoc() map[string]string {
}
var map_DeviceRequestAllocationResult = map[string]string{
- "": "DeviceRequestAllocationResult contains the allocation result for one request.",
- "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
- "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
- "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
+ "": "DeviceRequestAllocationResult contains the allocation result for one request.",
+ "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
+ "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
+ "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
}
func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string {
@@ -212,56 +225,27 @@ func (DeviceSelector) SwaggerDoc() map[string]string {
return map_DeviceSelector
}
+var map_NetworkDeviceData = map[string]string{
+ "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.",
+ "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.",
+ "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.",
+ "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.",
+}
+
+func (NetworkDeviceData) SwaggerDoc() map[string]string {
+ return map_NetworkDeviceData
+}
+
var map_OpaqueDeviceConfiguration = map[string]string{
"": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
"driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
- "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.",
+ "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
}
func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string {
return map_OpaqueDeviceConfiguration
}
-var map_PodSchedulingContext = map[string]string{
- "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.",
- "metadata": "Standard object metadata",
- "spec": "Spec describes where resources for the Pod are needed.",
- "status": "Status describes where resources for the Pod can be allocated.",
-}
-
-func (PodSchedulingContext) SwaggerDoc() map[string]string {
- return map_PodSchedulingContext
-}
-
-var map_PodSchedulingContextList = map[string]string{
- "": "PodSchedulingContextList is a collection of Pod scheduling objects.",
- "metadata": "Standard list metadata",
- "items": "Items is the list of PodSchedulingContext objects.",
-}
-
-func (PodSchedulingContextList) SwaggerDoc() map[string]string {
- return map_PodSchedulingContextList
-}
-
-var map_PodSchedulingContextSpec = map[string]string{
- "": "PodSchedulingContextSpec describes where resources for the Pod are needed.",
- "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.",
- "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.",
-}
-
-func (PodSchedulingContextSpec) SwaggerDoc() map[string]string {
- return map_PodSchedulingContextSpec
-}
-
-var map_PodSchedulingContextStatus = map[string]string{
- "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.",
- "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.",
-}
-
-func (PodSchedulingContextStatus) SwaggerDoc() map[string]string {
- return map_PodSchedulingContextStatus
-}
-
var map_ResourceClaim = map[string]string{
"": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata",
@@ -295,20 +279,9 @@ func (ResourceClaimList) SwaggerDoc() map[string]string {
return map_ResourceClaimList
}
-var map_ResourceClaimSchedulingStatus = map[string]string{
- "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.",
- "name": "Name matches the pod.spec.resourceClaims[*].Name field.",
- "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.",
-}
-
-func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string {
- return map_ResourceClaimSchedulingStatus
-}
-
var map_ResourceClaimSpec = map[string]string{
- "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.",
- "devices": "Devices defines how to request devices.",
- "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
+ "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.",
+ "devices": "Devices defines how to request devices.",
}
func (ResourceClaimSpec) SwaggerDoc() map[string]string {
@@ -316,10 +289,10 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string {
}
var map_ResourceClaimStatus = map[string]string{
- "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
- "allocation": "Allocation is set once the claim has been allocated successfully.",
- "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.",
- "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.",
+ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
+ "allocation": "Allocation is set once the claim has been allocated successfully.",
+ "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.",
+ "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.",
}
func (ResourceClaimStatus) SwaggerDoc() map[string]string {
@@ -348,7 +321,7 @@ func (ResourceClaimTemplateList) SwaggerDoc() map[string]string {
var map_ResourceClaimTemplateSpec = map[string]string{
"": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.",
- "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
+ "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.",
"spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.",
}
diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
index 58171df1f..07ba47b59 100644
--- a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
@@ -22,18 +22,48 @@ limitations under the License.
package v1alpha3
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Data.DeepCopyInto(&out.Data)
+ if in.NetworkData != nil {
+ in, out := &in.NetworkData, &out.NetworkData
+ *out = new(NetworkDeviceData)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus.
+func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AllocatedDeviceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
*out = *in
in.Devices.DeepCopyInto(&out.Devices)
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
- *out = new(v1.NodeSelector)
+ *out = new(corev1.NodeSelector)
(*in).DeepCopyInto(*out)
}
return
@@ -144,7 +174,9 @@ func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) {
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]DeviceRequestAllocationResult, len(*in))
- copy(*out, *in)
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
}
if in.Config != nil {
in, out := &in.Config, &out.Config
@@ -355,11 +387,6 @@ func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.SuitableNodes != nil {
- in, out := &in.SuitableNodes, &out.SuitableNodes
- *out = new(v1.NodeSelector)
- (*in).DeepCopyInto(*out)
- }
return
}
@@ -430,6 +457,11 @@ func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.AdminAccess != nil {
+ in, out := &in.AdminAccess, &out.AdminAccess
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -446,6 +478,11 @@ func (in *DeviceRequest) DeepCopy() *DeviceRequest {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) {
*out = *in
+ if in.AdminAccess != nil {
+ in, out := &in.AdminAccess, &out.AdminAccess
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -481,123 +518,39 @@ func (in *DeviceSelector) DeepCopy() *DeviceSelector {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) {
- *out = *in
- in.Parameters.DeepCopyInto(&out.Parameters)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration.
-func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration {
- if in == nil {
- return nil
- }
- out := new(OpaqueDeviceConfiguration)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
+func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) {
*out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext.
-func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext {
- if in == nil {
- return nil
- }
- out := new(PodSchedulingContext)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PodSchedulingContext) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]PodSchedulingContext, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList.
-func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList {
- if in == nil {
- return nil
- }
- out := new(PodSchedulingContextList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) {
- *out = *in
- if in.PotentialNodes != nil {
- in, out := &in.PotentialNodes, &out.PotentialNodes
+ if in.IPs != nil {
+ in, out := &in.IPs, &out.IPs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec.
-func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData.
+func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData {
if in == nil {
return nil
}
- out := new(PodSchedulingContextSpec)
+ out := new(NetworkDeviceData)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) {
+func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) {
*out = *in
- if in.ResourceClaims != nil {
- in, out := &in.ResourceClaims, &out.ResourceClaims
- *out = make([]ResourceClaimSchedulingStatus, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
+ in.Parameters.DeepCopyInto(&out.Parameters)
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus.
-func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration.
+func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration {
if in == nil {
return nil
}
- out := new(PodSchedulingContextStatus)
+ out := new(OpaqueDeviceConfiguration)
in.DeepCopyInto(out)
return out
}
@@ -679,27 +632,6 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
return nil
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) {
- *out = *in
- if in.UnsuitableNodes != nil {
- in, out := &in.UnsuitableNodes, &out.UnsuitableNodes
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus.
-func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus {
- if in == nil {
- return nil
- }
- out := new(ResourceClaimSchedulingStatus)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
*out = *in
@@ -730,6 +662,13 @@ func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
*out = make([]ResourceClaimConsumerReference, len(*in))
copy(*out, *in)
}
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]AllocatedDeviceStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -903,7 +842,7 @@ func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) {
out.Pool = in.Pool
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
- *out = new(v1.NodeSelector)
+ *out = new(corev1.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.Devices != nil {
diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 000000000..9f57ab670
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,218 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *DeviceClass) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClass"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeviceClass) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *DeviceClassList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClassList"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceClaim) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaim"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceClaimList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimList"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceClaimTemplate) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplate"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceClaimTemplateList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplateList"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceSlice) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSlice"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *ResourceSliceList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSliceList"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) {
+ return 1, 37
+}
diff --git a/vendor/k8s.io/api/resource/v1beta1/doc.go b/vendor/k8s.io/api/resource/v1beta1/doc.go
new file mode 100644
index 000000000..88c35c6ca
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:prerelease-lifecycle-gen=true
+// +groupName=resource.k8s.io
+
+// Package v1beta1 is the v1beta1 version of the resource API.
+package v1beta1 // import "k8s.io/api/resource/v1beta1"
diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.pb.go b/vendor/k8s.io/api/resource/v1beta1/generated.pb.go
new file mode 100644
index 000000000..df4e68f30
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/generated.pb.go
@@ -0,0 +1,8655 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/resource/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ v11 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{0}
+}
+func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
+}
+func (m *AllocatedDeviceStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
+
+func (m *AllocationResult) Reset() { *m = AllocationResult{} }
+func (*AllocationResult) ProtoMessage() {}
+func (*AllocationResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{1}
+}
+func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AllocationResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllocationResult.Merge(m, src)
+}
+func (m *AllocationResult) XXX_Size() int {
+ return m.Size()
+}
+func (m *AllocationResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_AllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
+
+func (m *BasicDevice) Reset() { *m = BasicDevice{} }
+func (*BasicDevice) ProtoMessage() {}
+func (*BasicDevice) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{2}
+}
+func (m *BasicDevice) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BasicDevice) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BasicDevice.Merge(m, src)
+}
+func (m *BasicDevice) XXX_Size() int {
+ return m.Size()
+}
+func (m *BasicDevice) XXX_DiscardUnknown() {
+ xxx_messageInfo_BasicDevice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BasicDevice proto.InternalMessageInfo
+
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
+func (*CELDeviceSelector) ProtoMessage() {}
+func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{3}
+}
+func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CELDeviceSelector.Merge(m, src)
+}
+func (m *CELDeviceSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *CELDeviceSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
+
+func (m *Device) Reset() { *m = Device{} }
+func (*Device) ProtoMessage() {}
+func (*Device) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{4}
+}
+func (m *Device) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Device) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Device.Merge(m, src)
+}
+func (m *Device) XXX_Size() int {
+ return m.Size()
+}
+func (m *Device) XXX_DiscardUnknown() {
+ xxx_messageInfo_Device.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device proto.InternalMessageInfo
+
+func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
+func (*DeviceAllocationConfiguration) ProtoMessage() {}
+func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{5}
+}
+func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
+}
+func (m *DeviceAllocationConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
+
+func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
+func (*DeviceAllocationResult) ProtoMessage() {}
+func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{6}
+}
+func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
+}
+func (m *DeviceAllocationResult) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
+
+func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
+func (*DeviceAttribute) ProtoMessage() {}
+func (*DeviceAttribute) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{7}
+}
+func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceAttribute.Merge(m, src)
+}
+func (m *DeviceAttribute) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceAttribute) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
+
+func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
+func (*DeviceCapacity) ProtoMessage() {}
+func (*DeviceCapacity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{8}
+}
+func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceCapacity.Merge(m, src)
+}
+func (m *DeviceCapacity) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceCapacity) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
+
+func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
+func (*DeviceClaim) ProtoMessage() {}
+func (*DeviceClaim) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{9}
+}
+func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClaim) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClaim.Merge(m, src)
+}
+func (m *DeviceClaim) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClaim) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
+
+func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
+func (*DeviceClaimConfiguration) ProtoMessage() {}
+func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{10}
+}
+func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
+}
+func (m *DeviceClaimConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
+
+func (m *DeviceClass) Reset() { *m = DeviceClass{} }
+func (*DeviceClass) ProtoMessage() {}
+func (*DeviceClass) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{11}
+}
+func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClass) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClass.Merge(m, src)
+}
+func (m *DeviceClass) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClass) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClass.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
+
+func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
+func (*DeviceClassConfiguration) ProtoMessage() {}
+func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{12}
+}
+func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
+}
+func (m *DeviceClassConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
+
+func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
+func (*DeviceClassList) ProtoMessage() {}
+func (*DeviceClassList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{13}
+}
+func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClassList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClassList.Merge(m, src)
+}
+func (m *DeviceClassList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClassList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
+
+func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
+func (*DeviceClassSpec) ProtoMessage() {}
+func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{14}
+}
+func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceClassSpec.Merge(m, src)
+}
+func (m *DeviceClassSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceClassSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
+
+func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
+func (*DeviceConfiguration) ProtoMessage() {}
+func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{15}
+}
+func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceConfiguration.Merge(m, src)
+}
+func (m *DeviceConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
+
+func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
+func (*DeviceConstraint) ProtoMessage() {}
+func (*DeviceConstraint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{16}
+}
+func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceConstraint.Merge(m, src)
+}
+func (m *DeviceConstraint) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceConstraint) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+
+func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
+func (*DeviceRequest) ProtoMessage() {}
+func (*DeviceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{17}
+}
+func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceRequest.Merge(m, src)
+}
+func (m *DeviceRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
+
+func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
+func (*DeviceRequestAllocationResult) ProtoMessage() {}
+func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{18}
+}
+func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
+}
+func (m *DeviceRequestAllocationResult) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
+
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
+func (*DeviceSelector) ProtoMessage() {}
+func (*DeviceSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{19}
+}
+func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeviceSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeviceSelector.Merge(m, src)
+}
+func (m *DeviceSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeviceSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+
+func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
+func (*NetworkDeviceData) ProtoMessage() {}
+func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{20}
+}
+func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkDeviceData.Merge(m, src)
+}
+func (m *NetworkDeviceData) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkDeviceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
+
+func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{21}
+}
+func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
+}
+func (m *OpaqueDeviceConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
+
+func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
+func (*ResourceClaim) ProtoMessage() {}
+func (*ResourceClaim) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{22}
+}
+func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaim) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaim.Merge(m, src)
+}
+func (m *ResourceClaim) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaim) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+
+func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
+func (*ResourceClaimConsumerReference) ProtoMessage() {}
+func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{23}
+}
+func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
+}
+func (m *ResourceClaimConsumerReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
+
+func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
+func (*ResourceClaimList) ProtoMessage() {}
+func (*ResourceClaimList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{24}
+}
+func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimList.Merge(m, src)
+}
+func (m *ResourceClaimList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
+
+func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
+func (*ResourceClaimSpec) ProtoMessage() {}
+func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{25}
+}
+func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
+}
+func (m *ResourceClaimSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
+
+func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
+func (*ResourceClaimStatus) ProtoMessage() {}
+func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{26}
+}
+func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
+}
+func (m *ResourceClaimStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
+func (*ResourceClaimTemplate) ProtoMessage() {}
+func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{27}
+}
+func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
+}
+func (m *ResourceClaimTemplate) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
+func (*ResourceClaimTemplateList) ProtoMessage() {}
+func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{28}
+}
+func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
+}
+func (m *ResourceClaimTemplateList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
+func (*ResourceClaimTemplateSpec) ProtoMessage() {}
+func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{29}
+}
+func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
+}
+func (m *ResourceClaimTemplateSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
+
+func (m *ResourcePool) Reset() { *m = ResourcePool{} }
+func (*ResourcePool) ProtoMessage() {}
+func (*ResourcePool) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{30}
+}
+func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourcePool) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourcePool.Merge(m, src)
+}
+func (m *ResourcePool) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourcePool) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourcePool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
+
+func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
+func (*ResourceSlice) ProtoMessage() {}
+func (*ResourceSlice) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{31}
+}
+func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceSlice) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceSlice.Merge(m, src)
+}
+func (m *ResourceSlice) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceSlice) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
+
+func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
+func (*ResourceSliceList) ProtoMessage() {}
+func (*ResourceSliceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{32}
+}
+func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceSliceList.Merge(m, src)
+}
+func (m *ResourceSliceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceSliceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
+
+func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
+func (*ResourceSliceSpec) ProtoMessage() {}
+func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ba331e3ec6484c27, []int{33}
+}
+func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
+}
+func (m *ResourceSliceSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1beta1.AllocatedDeviceStatus")
+ proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1beta1.AllocationResult")
+ proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1beta1.BasicDevice")
+ proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.AttributesEntry")
+ proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.CapacityEntry")
+ proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1beta1.CELDeviceSelector")
+ proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1beta1.Device")
+ proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationConfiguration")
+ proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationResult")
+ proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.DeviceAttribute")
+ proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.DeviceCapacity")
+ proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1beta1.DeviceClaim")
+ proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClaimConfiguration")
+ proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1beta1.DeviceClass")
+ proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClassConfiguration")
+ proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1beta1.DeviceClassList")
+ proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1beta1.DeviceClassSpec")
+ proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceConfiguration")
+ proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1beta1.DeviceConstraint")
+ proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1beta1.DeviceRequest")
+ proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceRequestAllocationResult")
+ proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1beta1.DeviceSelector")
+ proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1beta1.NetworkDeviceData")
+ proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.OpaqueDeviceConfiguration")
+ proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1beta1.ResourceClaim")
+ proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimConsumerReference")
+ proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimList")
+ proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimSpec")
+ proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimStatus")
+ proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplate")
+ proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateList")
+ proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateSpec")
+ proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1beta1.ResourcePool")
+ proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1beta1.ResourceSlice")
+ proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceList")
+ proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceSpec")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/api/resource/v1beta1/generated.proto", fileDescriptor_ba331e3ec6484c27)
+}
+
+var fileDescriptor_ba331e3ec6484c27 = []byte{
+ // 2051 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4b, 0x8f, 0x1b, 0x49,
+ 0x79, 0xda, 0xed, 0x79, 0x7d, 0x9e, 0x57, 0x2a, 0x64, 0x71, 0x26, 0xc2, 0x9e, 0x74, 0x24, 0xf0,
+ 0x66, 0xb3, 0xed, 0x8d, 0x81, 0x28, 0xca, 0x5e, 0x70, 0xcf, 0xcc, 0x06, 0x43, 0x32, 0x99, 0xad,
+ 0x61, 0x43, 0xb4, 0x6c, 0x10, 0x35, 0xed, 0x9a, 0x99, 0x66, 0xec, 0x6e, 0xa7, 0xbb, 0x7a, 0xb2,
+ 0x73, 0x40, 0xa0, 0x3d, 0xaf, 0x10, 0x77, 0xc4, 0x85, 0x03, 0x12, 0x12, 0x42, 0xfc, 0x02, 0x90,
+ 0x40, 0x88, 0x88, 0x03, 0xac, 0xe0, 0xb2, 0xe2, 0x60, 0x88, 0xf7, 0x07, 0x70, 0xcf, 0x09, 0x55,
+ 0x75, 0xf5, 0xd3, 0x6e, 0xd3, 0x83, 0x96, 0x51, 0xf6, 0xe6, 0xfe, 0xde, 0xf5, 0xbd, 0xab, 0x0c,
+ 0xaf, 0x1d, 0xdf, 0xf6, 0x74, 0xcb, 0x69, 0x92, 0x81, 0xd5, 0x74, 0xa9, 0xe7, 0xf8, 0xae, 0x49,
+ 0x9b, 0x27, 0x37, 0xf7, 0x29, 0x23, 0x37, 0x9b, 0x87, 0xd4, 0xa6, 0x2e, 0x61, 0xb4, 0xab, 0x0f,
+ 0x5c, 0x87, 0x39, 0xe8, 0x4a, 0x40, 0xac, 0x93, 0x81, 0xa5, 0x87, 0xc4, 0xba, 0x24, 0x5e, 0x7f,
+ 0xfd, 0xd0, 0x62, 0x47, 0xfe, 0xbe, 0x6e, 0x3a, 0xfd, 0xe6, 0xa1, 0x73, 0xe8, 0x34, 0x05, 0xcf,
+ 0xbe, 0x7f, 0x20, 0xbe, 0xc4, 0x87, 0xf8, 0x15, 0xc8, 0x5a, 0xd7, 0x12, 0x8a, 0x4d, 0xc7, 0xe5,
+ 0x4a, 0xb3, 0xfa, 0xd6, 0xbf, 0x12, 0xd3, 0xf4, 0x89, 0x79, 0x64, 0xd9, 0xd4, 0x3d, 0x6d, 0x0e,
+ 0x8e, 0x0f, 0xd3, 0xd6, 0x9e, 0x85, 0xcb, 0x6b, 0xf6, 0x29, 0x23, 0x93, 0x74, 0x35, 0xf3, 0xb8,
+ 0x5c, 0xdf, 0x66, 0x56, 0x7f, 0x5c, 0xcd, 0xad, 0xff, 0xc6, 0xe0, 0x99, 0x47, 0xb4, 0x4f, 0xb2,
+ 0x7c, 0xda, 0xcf, 0x55, 0xb8, 0xd4, 0xee, 0xf5, 0x1c, 0x93, 0xc3, 0xb6, 0xe8, 0x89, 0x65, 0xd2,
+ 0x3d, 0x46, 0x98, 0xef, 0xa1, 0x2f, 0xc2, 0x5c, 0xd7, 0xb5, 0x4e, 0xa8, 0x5b, 0x55, 0x36, 0x94,
+ 0xc6, 0xa2, 0xb1, 0xf2, 0x6c, 0x58, 0x9f, 0x19, 0x0d, 0xeb, 0x73, 0x5b, 0x02, 0x8a, 0x25, 0x16,
+ 0x6d, 0x40, 0x79, 0xe0, 0x38, 0xbd, 0x6a, 0x49, 0x50, 0x2d, 0x49, 0xaa, 0xf2, 0xae, 0xe3, 0xf4,
+ 0xb0, 0xc0, 0x08, 0x49, 0x42, 0x72, 0x55, 0xcd, 0x48, 0x12, 0x50, 0x2c, 0xb1, 0xc8, 0x04, 0x30,
+ 0x1d, 0xbb, 0x6b, 0x31, 0xcb, 0xb1, 0xbd, 0x6a, 0x79, 0x43, 0x6d, 0x54, 0x5a, 0x4d, 0x3d, 0x8e,
+ 0x72, 0x74, 0x30, 0x7d, 0x70, 0x7c, 0xc8, 0x01, 0x9e, 0xce, 0xfd, 0xa7, 0x9f, 0xdc, 0xd4, 0x37,
+ 0x43, 0x3e, 0x03, 0x49, 0xe1, 0x10, 0x81, 0x3c, 0x9c, 0x10, 0x8b, 0x1e, 0x40, 0xb9, 0x4b, 0x18,
+ 0xa9, 0xce, 0x6e, 0x28, 0x8d, 0x4a, 0xeb, 0xf5, 0x5c, 0xf1, 0xd2, 0x6f, 0x3a, 0x26, 0x4f, 0xb7,
+ 0xdf, 0x67, 0xd4, 0xf6, 0xb8, 0xf0, 0xe8, 0x74, 0x5b, 0x84, 0x11, 0x2c, 0x04, 0x21, 0x02, 0x15,
+ 0x9b, 0xb2, 0xa7, 0x8e, 0x7b, 0xcc, 0x81, 0xd5, 0x39, 0x21, 0x57, 0xd7, 0xa7, 0x24, 0xa7, 0xbe,
+ 0x23, 0xe9, 0xc5, 0xb1, 0x39, 0x97, 0xb1, 0x3a, 0x1a, 0xd6, 0x2b, 0x3b, 0xb1, 0x18, 0x9c, 0x94,
+ 0xa9, 0xfd, 0x59, 0x81, 0x35, 0x19, 0x24, 0xcb, 0xb1, 0x31, 0xf5, 0xfc, 0x1e, 0x43, 0xdf, 0x85,
+ 0xf9, 0xc0, 0x6f, 0x9e, 0x08, 0x50, 0xa5, 0xf5, 0xe5, 0xa9, 0x3a, 0x03, 0x65, 0x59, 0x29, 0xc6,
+ 0xaa, 0x3c, 0xd1, 0x7c, 0x80, 0xf7, 0x70, 0x28, 0x14, 0x3d, 0x84, 0x25, 0xdb, 0xe9, 0xd2, 0x3d,
+ 0xda, 0xa3, 0x26, 0x73, 0x5c, 0x11, 0xbb, 0x4a, 0x6b, 0x23, 0xa9, 0x84, 0x57, 0x0a, 0xf7, 0xfe,
+ 0x4e, 0x82, 0xce, 0x58, 0x1b, 0x0d, 0xeb, 0x4b, 0x49, 0x08, 0x4e, 0xc9, 0xd1, 0xfe, 0xa1, 0x42,
+ 0xc5, 0x20, 0x9e, 0x65, 0x06, 0x1a, 0xd1, 0x0f, 0x00, 0x08, 0x63, 0xae, 0xb5, 0xef, 0x33, 0x71,
+ 0x14, 0x1e, 0xf5, 0xdb, 0x53, 0x8f, 0x92, 0xe0, 0xd6, 0xdb, 0x11, 0xeb, 0xb6, 0xcd, 0xdc, 0x53,
+ 0xe3, 0x5a, 0x18, 0xfe, 0x18, 0xf1, 0xc1, 0x3f, 0xeb, 0xcb, 0x6f, 0xfb, 0xa4, 0x67, 0x1d, 0x58,
+ 0xb4, 0xbb, 0x43, 0xfa, 0x14, 0x27, 0x14, 0x22, 0x1f, 0x16, 0x4c, 0x32, 0x20, 0xa6, 0xc5, 0x4e,
+ 0xab, 0x25, 0xa1, 0xfc, 0x56, 0x61, 0xe5, 0x9b, 0x92, 0x31, 0x50, 0x7d, 0x55, 0xaa, 0x5e, 0x08,
+ 0xc1, 0xe3, 0x8a, 0x23, 0x55, 0xeb, 0xc7, 0xb0, 0x9a, 0x31, 0x1d, 0xad, 0x81, 0x7a, 0x4c, 0x4f,
+ 0x83, 0x6a, 0xc3, 0xfc, 0x27, 0x32, 0x60, 0xf6, 0x84, 0xf4, 0x7c, 0x2a, 0x6a, 0xab, 0xd2, 0xba,
+ 0x51, 0x24, 0xc0, 0xa1, 0x50, 0x1c, 0xb0, 0xde, 0x29, 0xdd, 0x56, 0xd6, 0x8f, 0x60, 0x39, 0x65,
+ 0xea, 0x04, 0x55, 0xed, 0xb4, 0xaa, 0xd7, 0x0a, 0xa8, 0x0a, 0x45, 0x26, 0x34, 0x69, 0x77, 0xe1,
+ 0xc2, 0xe6, 0xf6, 0x3d, 0xd9, 0x47, 0x64, 0xc4, 0x51, 0x0b, 0x80, 0xbe, 0x3f, 0x70, 0xa9, 0xc7,
+ 0x6b, 0x48, 0x76, 0x93, 0xa8, 0x4c, 0xb7, 0x23, 0x0c, 0x4e, 0x50, 0x69, 0x3e, 0xc8, 0xee, 0xc0,
+ 0xfb, 0x8b, 0x4d, 0xfa, 0x54, 0xf2, 0x45, 0x15, 0x28, 0xfc, 0x29, 0x30, 0xa8, 0x03, 0xb3, 0xfb,
+ 0x3c, 0x2a, 0xd2, 0xf6, 0x46, 0xd1, 0xf8, 0x19, 0x8b, 0xa3, 0x61, 0x7d, 0x56, 0x00, 0x70, 0x20,
+ 0x41, 0xfb, 0xb0, 0x04, 0x5f, 0xc8, 0x56, 0xca, 0xa6, 0x63, 0x1f, 0x58, 0x87, 0xbe, 0x2b, 0x3e,
+ 0xd0, 0xd7, 0x60, 0x2e, 0x90, 0x28, 0x0d, 0x6a, 0x84, 0xcd, 0x6c, 0x4f, 0x40, 0x5f, 0x0c, 0xeb,
+ 0xaf, 0x64, 0x59, 0x03, 0x0c, 0x96, 0x7c, 0xa8, 0x01, 0x0b, 0x2e, 0x7d, 0xe2, 0x53, 0x8f, 0x79,
+ 0x22, 0xe3, 0x16, 0x8d, 0x25, 0x9e, 0x35, 0x58, 0xc2, 0x70, 0x84, 0x45, 0x3f, 0x84, 0x8b, 0x41,
+ 0x35, 0xa6, 0x4c, 0x90, 0x95, 0xf8, 0x46, 0x91, 0x10, 0x25, 0xf9, 0x8c, 0x2b, 0xd2, 0xd4, 0x8b,
+ 0x13, 0x90, 0x78, 0x92, 0x26, 0xed, 0x13, 0x05, 0x5e, 0x99, 0xdc, 0x38, 0x10, 0x85, 0x79, 0x57,
+ 0xfc, 0x0a, 0x6b, 0xf6, 0x4e, 0x01, 0x7b, 0xe4, 0x19, 0xf3, 0xbb, 0x50, 0xf0, 0xed, 0xe1, 0x50,
+ 0x36, 0xda, 0x87, 0x39, 0x53, 0x98, 0x24, 0x8b, 0xf3, 0xce, 0x99, 0x9a, 0x5c, 0xfa, 0xfc, 0xd1,
+ 0xdc, 0x09, 0xc0, 0x58, 0x4a, 0xd6, 0x7e, 0xa9, 0xc0, 0x6a, 0xa6, 0x7a, 0x50, 0x0d, 0x54, 0xcb,
+ 0x66, 0x22, 0xa3, 0xd4, 0x20, 0x3e, 0x1d, 0x9b, 0x3d, 0xe4, 0x79, 0x8e, 0x39, 0x02, 0x5d, 0x85,
+ 0xf2, 0x3e, 0x9f, 0x7a, 0x3c, 0x16, 0x0b, 0xc6, 0xf2, 0x68, 0x58, 0x5f, 0x34, 0x1c, 0xa7, 0x17,
+ 0x50, 0x08, 0x14, 0xfa, 0x12, 0xcc, 0x79, 0xcc, 0xb5, 0xec, 0xc3, 0x6a, 0x59, 0x64, 0x8a, 0xe8,
+ 0xf1, 0x7b, 0x02, 0x12, 0x90, 0x49, 0x34, 0xba, 0x0e, 0xf3, 0x27, 0xd4, 0x15, 0xc5, 0x31, 0x2b,
+ 0x28, 0x45, 0x0b, 0x7d, 0x18, 0x80, 0x02, 0xd2, 0x90, 0x40, 0xa3, 0xb0, 0x92, 0xae, 0x3e, 0xb4,
+ 0x17, 0x56, 0xae, 0x32, 0x36, 0x79, 0xc6, 0x06, 0x66, 0xec, 0xb1, 0xb7, 0x7d, 0x62, 0x33, 0x8b,
+ 0x9d, 0x1a, 0xcb, 0xd2, 0x29, 0xb3, 0x81, 0xa2, 0x40, 0x96, 0xf6, 0xab, 0x12, 0x54, 0xa4, 0x9e,
+ 0x1e, 0xb1, 0xfa, 0xe8, 0x51, 0x22, 0x67, 0x83, 0x70, 0x5f, 0x2f, 0x1e, 0x6e, 0x63, 0x2d, 0xec,
+ 0x8c, 0x13, 0x72, 0xbc, 0x0b, 0x15, 0xd3, 0xb1, 0x3d, 0xe6, 0x12, 0xcb, 0x96, 0x05, 0x91, 0x1e,
+ 0xcb, 0x53, 0x72, 0x5b, 0x72, 0x19, 0x17, 0xa5, 0xfc, 0x4a, 0x0c, 0xf3, 0x70, 0x52, 0x2c, 0x7a,
+ 0x1c, 0xa5, 0x91, 0x2a, 0x14, 0x7c, 0xb5, 0x88, 0x02, 0x7e, 0xf2, 0x62, 0x19, 0xf4, 0x47, 0x05,
+ 0xaa, 0x79, 0x4c, 0xa9, 0x7a, 0x57, 0xfe, 0x97, 0x7a, 0x2f, 0x9d, 0x5b, 0xbd, 0xff, 0x4e, 0x49,
+ 0x84, 0xdd, 0xf3, 0xd0, 0xf7, 0x60, 0x81, 0x6f, 0x58, 0x62, 0x61, 0x52, 0xc6, 0xac, 0x98, 0xb2,
+ 0x8f, 0x3d, 0xd8, 0xff, 0x3e, 0x35, 0xd9, 0x7d, 0xca, 0x48, 0xdc, 0xe9, 0x63, 0x18, 0x8e, 0xa4,
+ 0xa2, 0x1d, 0x28, 0x7b, 0x03, 0x6a, 0x9e, 0x61, 0xc2, 0x09, 0xcb, 0xf6, 0x06, 0xd4, 0x8c, 0x67,
+ 0x01, 0xff, 0xc2, 0x42, 0x8e, 0xf6, 0xd3, 0x64, 0x24, 0x3c, 0x2f, 0x1d, 0x89, 0x1c, 0xff, 0x2a,
+ 0xe7, 0xe6, 0xdf, 0xdf, 0x46, 0x9d, 0x46, 0x58, 0x77, 0xcf, 0xf2, 0x18, 0x7a, 0x6f, 0xcc, 0xc7,
+ 0x7a, 0x31, 0x1f, 0x73, 0x6e, 0xe1, 0xe1, 0xa8, 0xbc, 0x42, 0x48, 0xc2, 0xbf, 0xf7, 0x61, 0xd6,
+ 0x62, 0xb4, 0x1f, 0x16, 0x56, 0xa3, 0xa8, 0x83, 0xe3, 0xbe, 0xd0, 0xe1, 0xec, 0x38, 0x90, 0xa2,
+ 0xfd, 0x25, 0x7d, 0x00, 0xee, 0x78, 0xf4, 0x1e, 0x2c, 0x7a, 0x72, 0xd4, 0x87, 0xcd, 0xa1, 0xc8,
+ 0xfa, 0x10, 0x2d, 0x8c, 0x17, 0xa4, 0xa6, 0xc5, 0x10, 0xe2, 0xe1, 0x58, 0x60, 0xa2, 0x72, 0x4b,
+ 0x67, 0xa9, 0xdc, 0x4c, 0xe8, 0x73, 0x2b, 0xf7, 0x09, 0x4c, 0x8a, 0x1e, 0x7a, 0x17, 0xe6, 0x9c,
+ 0x01, 0x79, 0x12, 0x75, 0xd5, 0xe9, 0x3b, 0xe1, 0x03, 0x41, 0x3a, 0x29, 0x45, 0x80, 0xab, 0x0c,
+ 0xd0, 0x58, 0x4a, 0xd4, 0x7e, 0xac, 0xc0, 0x5a, 0xb6, 0x85, 0x9d, 0xa1, 0x49, 0xec, 0xc2, 0x4a,
+ 0x9f, 0x30, 0xf3, 0x28, 0x9a, 0x55, 0xf2, 0xe6, 0xd5, 0x18, 0x0d, 0xeb, 0x2b, 0xf7, 0x53, 0x98,
+ 0x17, 0xc3, 0x3a, 0x7a, 0xcb, 0xef, 0xf5, 0x4e, 0xd3, 0x5b, 0x68, 0x86, 0x5f, 0xfb, 0x40, 0x85,
+ 0xe5, 0x54, 0xc3, 0x2e, 0xb0, 0x73, 0xb5, 0x61, 0xb5, 0x1b, 0xfb, 0x9a, 0x23, 0xa4, 0x19, 0x9f,
+ 0x97, 0xc4, 0xc9, 0x34, 0x11, 0x7c, 0x59, 0xfa, 0x74, 0xde, 0xa8, 0x9f, 0x76, 0xde, 0x3c, 0x84,
+ 0x15, 0x12, 0xed, 0x01, 0xf7, 0x9d, 0x2e, 0x95, 0x53, 0x58, 0x97, 0x5c, 0x2b, 0xed, 0x14, 0xf6,
+ 0xc5, 0xb0, 0xfe, 0xb9, 0xec, 0xf6, 0xc0, 0xe1, 0x38, 0x23, 0x05, 0x5d, 0x83, 0x59, 0xd3, 0xf1,
+ 0x6d, 0x26, 0x46, 0xb5, 0x1a, 0x97, 0xc9, 0x26, 0x07, 0xe2, 0x00, 0x87, 0x6e, 0x42, 0x85, 0x74,
+ 0xfb, 0x96, 0xdd, 0x36, 0x4d, 0xea, 0x79, 0xe2, 0x4e, 0xb8, 0x10, 0xcc, 0xff, 0x76, 0x0c, 0xc6,
+ 0x49, 0x1a, 0xed, 0xdf, 0x4a, 0xb8, 0x79, 0xe6, 0x2c, 0x49, 0xe8, 0x55, 0xbe, 0x71, 0x09, 0x94,
+ 0x8c, 0x4b, 0x62, 0x6b, 0x12, 0x60, 0x1c, 0xe2, 0x13, 0x77, 0xf7, 0x52, 0xa1, 0xbb, 0xbb, 0x5a,
+ 0xe0, 0xee, 0x5e, 0x9e, 0x7a, 0x77, 0xcf, 0x9c, 0x78, 0xb6, 0xc0, 0x89, 0xbf, 0x13, 0xae, 0x32,
+ 0xd1, 0x45, 0xa1, 0x03, 0xaa, 0x49, 0x7b, 0x13, 0xba, 0xe0, 0x78, 0x2e, 0x8c, 0xdd, 0x32, 0x8c,
+ 0xf9, 0xd1, 0xb0, 0xae, 0x6e, 0x6e, 0xdf, 0xc3, 0x5c, 0x86, 0xf6, 0x6b, 0x05, 0x2e, 0x8c, 0x5d,
+ 0xb3, 0xd1, 0x9b, 0xb0, 0x6c, 0xd9, 0x8c, 0xba, 0x07, 0xc4, 0xa4, 0x3b, 0x71, 0x82, 0x5f, 0x92,
+ 0x87, 0x5a, 0xee, 0x24, 0x91, 0x38, 0x4d, 0x8b, 0x2e, 0x83, 0x6a, 0x0d, 0xc2, 0x95, 0x5d, 0x68,
+ 0xeb, 0xec, 0x7a, 0x98, 0xc3, 0x78, 0x35, 0x1c, 0x11, 0xb7, 0xfb, 0x94, 0xb8, 0xb4, 0xdd, 0xed,
+ 0xf2, 0x3b, 0x8c, 0x74, 0x69, 0x54, 0x0d, 0x5f, 0x4f, 0xa3, 0x71, 0x96, 0x5e, 0xfb, 0x85, 0x02,
+ 0x97, 0x73, 0xfb, 0x48, 0xe1, 0xc7, 0x18, 0x02, 0x30, 0x20, 0x2e, 0xe9, 0x53, 0x46, 0x5d, 0x4f,
+ 0x0e, 0xd5, 0x33, 0xbe, 0x71, 0x44, 0xf3, 0x7a, 0x37, 0x12, 0x84, 0x13, 0x42, 0xb5, 0x9f, 0x95,
+ 0x60, 0x19, 0xcb, 0x70, 0x04, 0xcb, 0xe1, 0xff, 0x7f, 0x4b, 0xd8, 0x4d, 0x6d, 0x09, 0xd3, 0x33,
+ 0x23, 0x65, 0x5b, 0xde, 0x9e, 0x80, 0x1e, 0xf1, 0xe5, 0x9c, 0x30, 0xdf, 0x2b, 0x74, 0x9b, 0x4a,
+ 0xcb, 0x14, 0x7c, 0x71, 0x08, 0x82, 0x6f, 0x2c, 0xe5, 0x69, 0x23, 0x05, 0x6a, 0x29, 0x7a, 0xde,
+ 0xe5, 0xfd, 0x3e, 0x75, 0x31, 0x3d, 0xa0, 0x2e, 0xb5, 0x4d, 0x8a, 0x6e, 0xc0, 0x02, 0x19, 0x58,
+ 0x77, 0x5d, 0xc7, 0x1f, 0xc8, 0x78, 0x46, 0x23, 0xbc, 0xbd, 0xdb, 0x11, 0x70, 0x1c, 0x51, 0x70,
+ 0xea, 0xd0, 0x20, 0x99, 0x55, 0x89, 0x7d, 0x3a, 0x80, 0xe3, 0x88, 0x22, 0x6a, 0xdd, 0xe5, 0xdc,
+ 0xd6, 0x6d, 0x80, 0xea, 0x5b, 0x5d, 0x79, 0xd5, 0x78, 0x43, 0x12, 0xa8, 0xef, 0x74, 0xb6, 0x5e,
+ 0x0c, 0xeb, 0x57, 0xf3, 0x9e, 0x11, 0xd9, 0xe9, 0x80, 0x7a, 0xfa, 0x3b, 0x9d, 0x2d, 0xcc, 0x99,
+ 0xb5, 0xdf, 0x2b, 0x70, 0x21, 0x75, 0xc8, 0x73, 0x58, 0x65, 0x1e, 0xa4, 0x57, 0x99, 0xeb, 0xc5,
+ 0x23, 0x96, 0xb3, 0xcc, 0x1c, 0x65, 0xce, 0x20, 0xb6, 0x99, 0xbd, 0xec, 0xb3, 0x5a, 0xa3, 0xe8,
+ 0x55, 0x21, 0xff, 0x2d, 0x4d, 0xfb, 0x53, 0x09, 0x2e, 0x4e, 0xc8, 0x21, 0xf4, 0x18, 0x20, 0x1e,
+ 0x2f, 0x52, 0xdf, 0xf4, 0xbb, 0xcf, 0xd8, 0xd5, 0x79, 0x45, 0x3c, 0x76, 0xc5, 0xd0, 0x84, 0x40,
+ 0xe4, 0x42, 0xc5, 0xa5, 0x1e, 0x75, 0x4f, 0x68, 0xf7, 0x2d, 0xc7, 0x95, 0x7e, 0x7b, 0xb3, 0xb8,
+ 0xdf, 0xc6, 0x32, 0x37, 0xbe, 0x69, 0xe1, 0x58, 0x2e, 0x4e, 0x2a, 0x41, 0x8f, 0x63, 0xff, 0x05,
+ 0x2f, 0xb8, 0xad, 0x22, 0xe7, 0x49, 0xbf, 0x3d, 0x4f, 0xf1, 0xe4, 0xdf, 0x15, 0xb8, 0x94, 0xb2,
+ 0xf1, 0x5b, 0xb4, 0x3f, 0xe8, 0x11, 0x46, 0xcf, 0xa1, 0x0b, 0x3d, 0x4a, 0x75, 0xa1, 0x5b, 0xc5,
+ 0xfd, 0x18, 0xda, 0x98, 0x7b, 0x6b, 0xf9, 0x9b, 0x02, 0x97, 0x27, 0x72, 0x9c, 0x43, 0x59, 0x7d,
+ 0x3b, 0x5d, 0x56, 0xad, 0xb3, 0x1f, 0x2b, 0xa7, 0xbc, 0xfe, 0x9a, 0x77, 0x28, 0x51, 0x67, 0x9f,
+ 0xc1, 0xa1, 0xa1, 0xfd, 0x46, 0x81, 0xa5, 0x90, 0x92, 0xef, 0x48, 0x05, 0xf6, 0xe4, 0x16, 0x80,
+ 0xfc, 0xcb, 0x25, 0xbc, 0xc9, 0xab, 0xb1, 0xd9, 0x77, 0x23, 0x0c, 0x4e, 0x50, 0xa1, 0x6f, 0x00,
+ 0x0a, 0x0d, 0xdc, 0xeb, 0x89, 0x55, 0x80, 0xef, 0x9b, 0xaa, 0xe0, 0x5d, 0x97, 0xbc, 0x08, 0x8f,
+ 0x51, 0xe0, 0x09, 0x5c, 0xda, 0x1f, 0x94, 0x78, 0x5a, 0x0b, 0xf0, 0x4b, 0xea, 0x78, 0x61, 0x5b,
+ 0xae, 0xe3, 0x93, 0xe3, 0x46, 0x50, 0xbe, 0xac, 0xe3, 0x46, 0x18, 0x97, 0x53, 0x0f, 0x1f, 0xaa,
+ 0x99, 0x43, 0x88, 0x3a, 0x28, 0xba, 0xd9, 0x7d, 0x33, 0xf1, 0x37, 0x5b, 0xa5, 0xf5, 0x6a, 0x21,
+ 0x6b, 0x78, 0x8e, 0x4e, 0xdc, 0xea, 0x6f, 0xc0, 0x82, 0xed, 0x74, 0x83, 0x15, 0x38, 0xb3, 0x52,
+ 0xec, 0x48, 0x38, 0x8e, 0x28, 0xc6, 0xfe, 0x09, 0x2a, 0x7f, 0x3a, 0xff, 0x04, 0x89, 0x35, 0xa8,
+ 0xd7, 0xe3, 0x04, 0xe1, 0x85, 0x21, 0x5e, 0x83, 0x24, 0x1c, 0x47, 0x14, 0x68, 0x27, 0x1e, 0x2c,
+ 0x73, 0x22, 0x22, 0xd7, 0x0a, 0x0c, 0xe6, 0xfc, 0x49, 0x62, 0xb4, 0x9f, 0x3d, 0xaf, 0xcd, 0x7c,
+ 0xf4, 0xbc, 0x36, 0xf3, 0xf1, 0xf3, 0xda, 0xcc, 0x8f, 0x46, 0x35, 0xe5, 0xd9, 0xa8, 0xa6, 0x7c,
+ 0x34, 0xaa, 0x29, 0x1f, 0x8f, 0x6a, 0xca, 0xbf, 0x46, 0x35, 0xe5, 0x27, 0x9f, 0xd4, 0x66, 0xde,
+ 0xbd, 0x32, 0xe5, 0x1f, 0xe9, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x26, 0xe2, 0x5c, 0xf8, 0xaf,
+ 0x1e, 0x00, 0x00,
+}
+
+func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NetworkData != nil {
+ {
+ size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ {
+ size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.Device)
+ copy(dAtA[i:], m.Device)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Pool)
+ copy(dAtA[i:], m.Pool)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeSelector != nil {
+ {
+ size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BasicDevice) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Capacity) > 0 {
+ keysForCapacity := make([]string, 0, len(m.Capacity))
+ for k := range m.Capacity {
+ keysForCapacity = append(keysForCapacity, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForCapacity[iNdEx])
+ copy(dAtA[i:], keysForCapacity[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Attributes) > 0 {
+ keysForAttributes := make([]string, 0, len(m.Attributes))
+ for k := range m.Attributes {
+ keysForAttributes = append(keysForAttributes, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAttributes[iNdEx])
+ copy(dAtA[i:], keysForAttributes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Device) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Device) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Basic != nil {
+ {
+ size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Requests) > 0 {
+ for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Requests[iNdEx])
+ copy(dAtA[i:], m.Requests[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Source)
+ copy(dAtA[i:], m.Source)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Config) > 0 {
+ for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Results) > 0 {
+ for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.VersionValue != nil {
+ i -= len(*m.VersionValue)
+ copy(dAtA[i:], *m.VersionValue)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.StringValue != nil {
+ i -= len(*m.StringValue)
+ copy(dAtA[i:], *m.StringValue)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.BoolValue != nil {
+ i--
+ if *m.BoolValue {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.IntValue != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
+ i--
+ dAtA[i] = 0x10
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClaim) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Config) > 0 {
+ for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Constraints) > 0 {
+ for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Requests) > 0 {
+ for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Requests) > 0 {
+ for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Requests[iNdEx])
+ copy(dAtA[i:], m.Requests[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClass) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Config) > 0 {
+ for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Opaque != nil {
+ {
+ size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MatchAttribute != nil {
+ i -= len(*m.MatchAttribute)
+ copy(dAtA[i:], *m.MatchAttribute)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Requests) > 0 {
+ for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Requests[iNdEx])
+ copy(dAtA[i:], m.Requests[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AdminAccess != nil {
+ i--
+ if *m.AdminAccess {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
+ i--
+ dAtA[i] = 0x28
+ i -= len(m.AllocationMode)
+ copy(dAtA[i:], m.AllocationMode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.DeviceClassName)
+ copy(dAtA[i:], m.DeviceClassName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AdminAccess != nil {
+ i--
+ if *m.AdminAccess {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ i -= len(m.Device)
+ copy(dAtA[i:], m.Device)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Pool)
+ copy(dAtA[i:], m.Pool)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Request)
+ copy(dAtA[i:], m.Request)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.CEL != nil {
+ {
+ size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.HardwareAddress)
+ copy(dAtA[i:], m.HardwareAddress)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.IPs) > 0 {
+ for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.IPs[iNdEx])
+ copy(dAtA[i:], m.IPs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.InterfaceName)
+ copy(dAtA[i:], m.InterfaceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.APIGroup)
+ copy(dAtA[i:], m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Devices) > 0 {
+ for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.ReservedFor) > 0 {
+ for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Allocation != nil {
+ {
+ size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Devices) > 0 {
+ for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i--
+ if m.AllNodes {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ if m.NodeSelector != nil {
+ {
+ size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.NodeName)
+ copy(dAtA[i:], m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Driver)
+ copy(dAtA[i:], m.Driver)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AllocatedDeviceStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Data.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NetworkData != nil {
+ l = m.NetworkData.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *AllocationResult) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Devices.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BasicDevice) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Attributes) > 0 {
+ for k, v := range m.Attributes {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Capacity) > 0 {
+ for k, v := range m.Capacity {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *CELDeviceSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Device) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Basic != nil {
+ l = m.Basic.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceAllocationConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Source)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.DeviceConfiguration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeviceAllocationResult) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Results) > 0 {
+ for _, e := range m.Results {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceAttribute) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IntValue != nil {
+ n += 1 + sovGenerated(uint64(*m.IntValue))
+ }
+ if m.BoolValue != nil {
+ n += 2
+ }
+ if m.StringValue != nil {
+ l = len(*m.StringValue)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.VersionValue != nil {
+ l = len(*m.VersionValue)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceCapacity) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Value.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeviceClaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, e := range m.Requests {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Constraints) > 0 {
+ for _, e := range m.Constraints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceClaimConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.DeviceConfiguration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeviceClass) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeviceClassConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.DeviceConfiguration.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeviceClassList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceClassSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Config) > 0 {
+ for _, e := range m.Config {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Opaque != nil {
+ l = m.Opaque.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceConstraint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Requests) > 0 {
+ for _, s := range m.Requests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchAttribute != nil {
+ l = len(*m.MatchAttribute)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeviceRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DeviceClassName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.AllocationMode)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Count))
+ if m.AdminAccess != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *DeviceRequestAllocationResult) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Request)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Pool)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Device)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.AdminAccess != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *DeviceSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CEL != nil {
+ l = m.CEL.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkDeviceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.InterfaceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.IPs) > 0 {
+ for _, s := range m.IPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.HardwareAddress)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *OpaqueDeviceConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Parameters.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaim) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimConsumerReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Devices.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Allocation != nil {
+ l = m.Allocation.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ReservedFor) > 0 {
+ for _, e := range m.ReservedFor {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Devices) > 0 {
+ for _, e := range m.Devices {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimTemplate) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceClaimTemplateList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceClaimTemplateSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourcePool) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Generation))
+ n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
+ return n
+}
+
+func (m *ResourceSlice) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceSliceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceSliceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Driver)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Pool.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if len(m.Devices) > 0 {
+ for _, e := range m.Devices {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AllocatedDeviceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&AllocatedDeviceStatus{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
+ `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AllocationResult) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AllocationResult{`,
+ `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BasicDevice) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAttributes := make([]string, 0, len(this.Attributes))
+ for k := range this.Attributes {
+ keysForAttributes = append(keysForAttributes, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
+ for _, k := range keysForAttributes {
+ mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
+ }
+ mapStringForAttributes += "}"
+ keysForCapacity := make([]string, 0, len(this.Capacity))
+ for k := range this.Capacity {
+ keysForCapacity = append(keysForCapacity, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
+ for _, k := range keysForCapacity {
+ mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
+ }
+ mapStringForCapacity += "}"
+ s := strings.Join([]string{`&BasicDevice{`,
+ `Attributes:` + mapStringForAttributes + `,`,
+ `Capacity:` + mapStringForCapacity + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CELDeviceSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CELDeviceSelector{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Device) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Device{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceAllocationConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceAllocationConfiguration{`,
+ `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+ `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+ `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceAllocationResult) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResults := "[]DeviceRequestAllocationResult{"
+ for _, f := range this.Results {
+ repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResults += "}"
+ repeatedStringForConfig := "[]DeviceAllocationConfiguration{"
+ for _, f := range this.Config {
+ repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConfig += "}"
+ s := strings.Join([]string{`&DeviceAllocationResult{`,
+ `Results:` + repeatedStringForResults + `,`,
+ `Config:` + repeatedStringForConfig + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceAttribute) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceAttribute{`,
+ `IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
+ `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
+ `StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
+ `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceCapacity) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceCapacity{`,
+ `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClaim) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRequests := "[]DeviceRequest{"
+ for _, f := range this.Requests {
+ repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRequests += "}"
+ repeatedStringForConstraints := "[]DeviceConstraint{"
+ for _, f := range this.Constraints {
+ repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConstraints += "}"
+ repeatedStringForConfig := "[]DeviceClaimConfiguration{"
+ for _, f := range this.Config {
+ repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConfig += "}"
+ s := strings.Join([]string{`&DeviceClaim{`,
+ `Requests:` + repeatedStringForRequests + `,`,
+ `Constraints:` + repeatedStringForConstraints + `,`,
+ `Config:` + repeatedStringForConfig + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClaimConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceClaimConfiguration{`,
+ `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+ `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClass) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceClass{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClassConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceClassConfiguration{`,
+ `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClassList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]DeviceClass{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeviceClassList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceClassSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]DeviceSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSelectors += "}"
+ repeatedStringForConfig := "[]DeviceClassConfiguration{"
+ for _, f := range this.Config {
+ repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConfig += "}"
+ s := strings.Join([]string{`&DeviceClassSpec{`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `Config:` + repeatedStringForConfig + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceConfiguration{`,
+ `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceConstraint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceConstraint{`,
+ `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+ `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]DeviceSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSelectors += "}"
+ s := strings.Join([]string{`&DeviceRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
+ `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+ `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceRequestAllocationResult) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
+ `Request:` + fmt.Sprintf("%v", this.Request) + `,`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
+ `Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+ `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeviceSelector{`,
+ `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkDeviceData) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkDeviceData{`,
+ `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`,
+ `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`,
+ `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OpaqueDeviceConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaim) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceClaim{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimConsumerReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
+ `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
+ `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ResourceClaim{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ResourceClaimList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceClaimSpec{`,
+ `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
+ for _, f := range this.ReservedFor {
+ repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForReservedFor += "}"
+ repeatedStringForDevices := "[]AllocatedDeviceStatus{"
+ for _, f := range this.Devices {
+ repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDevices += "}"
+ s := strings.Join([]string{`&ResourceClaimStatus{`,
+ `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
+ `ReservedFor:` + repeatedStringForReservedFor + `,`,
+ `Devices:` + repeatedStringForDevices + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimTemplate) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceClaimTemplate{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimTemplateList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ResourceClaimTemplate{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ResourceClaimTemplateList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceClaimTemplateSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourcePool) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourcePool{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+ `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceSlice) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceSlice{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceSliceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ResourceSlice{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ResourceSliceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceSliceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForDevices := "[]Device{"
+ for _, f := range this.Devices {
+ repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDevices += "}"
+ s := strings.Join([]string{`&ResourceSliceSpec{`,
+ `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+ `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+ `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`,
+ `Devices:` + repeatedStringForDevices + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Pool = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Device = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NetworkData == nil {
+ m.NetworkData = &NetworkDeviceData{}
+ }
+ if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AllocationResult) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeSelector == nil {
+ m.NodeSelector = &v11.NodeSelector{}
+ }
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BasicDevice) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Attributes == nil {
+ m.Attributes = make(map[QualifiedName]DeviceAttribute)
+ }
+ var mapkey QualifiedName
+ mapvalue := &DeviceAttribute{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &DeviceAttribute{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Attributes[QualifiedName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Capacity == nil {
+ m.Capacity = make(map[QualifiedName]DeviceCapacity)
+ }
+ var mapkey QualifiedName
+ mapvalue := &DeviceCapacity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &DeviceCapacity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Capacity[QualifiedName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Device) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Device: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Basic == nil {
+ m.Basic = &BasicDevice{}
+ }
+ if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Results = append(m.Results, DeviceRequestAllocationResult{})
+ if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Config = append(m.Config, DeviceAllocationConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IntValue = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.BoolValue = &b
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.StringValue = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.VersionValue = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceCapacity) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, DeviceRequest{})
+ if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Constraints = append(m.Constraints, DeviceConstraint{})
+ if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Config = append(m.Config, DeviceClaimConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClass) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DeviceClass{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Config = append(m.Config, DeviceClassConfiguration{})
+ if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Opaque == nil {
+ m.Opaque = &OpaqueDeviceConfiguration{}
+ }
+ if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FullyQualifiedName(dAtA[iNdEx:postIndex])
+ m.MatchAttribute = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DeviceClassName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selectors = append(m.Selectors, DeviceSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AdminAccess = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Request = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Pool = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Device = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.AdminAccess = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CEL == nil {
+ m.CEL = &CELDeviceSelector{}
+ }
+ if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.InterfaceName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HardwareAddress = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIGroup = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ResourceClaim{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Allocation == nil {
+ m.Allocation = &AllocationResult{}
+ }
+ if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
+ if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Devices = append(m.Devices, AllocatedDeviceStatus{})
+ if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ResourceClaimTemplate{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourcePool) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Generation |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType)
+ }
+ m.ResourceSliceCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ResourceSliceCount |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ResourceSlice{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Driver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NodeName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeSelector == nil {
+ m.NodeSelector = &v11.NodeSelector{}
+ }
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllNodes = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Devices = append(m.Devices, Device{})
+ if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto
new file mode 100644
index 000000000..4ea13e033
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto
@@ -0,0 +1,892 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.resource.v1beta1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/resource/v1beta1";
+
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+message AllocatedDeviceStatus {
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ optional string driver = 1;
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ optional string pool = 2;
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ optional string device = 3;
+
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4;
+
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5;
+
+ // NetworkData contains network-related information specific to the device.
+ //
+ // +optional
+ optional NetworkDeviceData networkData = 6;
+}
+
+// AllocationResult contains attributes of an allocated resource.
+message AllocationResult {
+ // Devices is the result of allocating devices.
+ //
+ // +optional
+ optional DeviceAllocationResult devices = 1;
+
+ // NodeSelector defines where the allocated resources are available. If
+ // unset, they are available everywhere.
+ //
+ // +optional
+ optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
+}
+
+// BasicDevice defines one device instance.
+message BasicDevice {
+ // Attributes defines the set of attributes for this device.
+ // The name of each attribute must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ //
+ // +optional
+ map attributes = 1;
+
+ // Capacity defines the set of capacities for this device.
+ // The name of each capacity must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ //
+ // +optional
+ map capacity = 2;
+}
+
+// CELDeviceSelector contains a CEL expression for selecting a device.
+message CELDeviceSelector {
+ // Expression is a CEL expression which evaluates a single device. It
+ // must evaluate to true when the device under consideration satisfies
+ // the desired criteria, and false when it does not. Any other result
+ // is an error and causes allocation of devices to abort.
+ //
+ // The expression's input is an object named "device", which carries
+ // the following properties:
+ // - driver (string): the name of the driver which defines this device.
+ // - attributes (map[string]object): the device's attributes, grouped by prefix
+ // (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+ // of the attributes which were prefixed by "dra.example.com".
+ // - capacity (map[string]object): the device's capacities, grouped by prefix.
+ //
+ // Example: Consider a device with driver="dra.example.com", which exposes
+ // two attributes named "model" and "ext.example.com/family" and which
+ // exposes one capacity named "modules". This input to this expression
+ // would have the following fields:
+ //
+ // device.driver
+ // device.attributes["dra.example.com"].model
+ // device.attributes["ext.example.com"].family
+ // device.capacity["dra.example.com"].modules
+ //
+ // The device.driver field can be used to check for a specific driver,
+ // either as a high-level precondition (i.e. you only want to consider
+ // devices from this driver) or as part of a multi-clause expression
+ // that is meant to consider devices from different drivers.
+ //
+ // The value type of each attribute is defined by the device
+ // definition, and users who write these expressions must consult the
+ // documentation for their specific drivers. The value type of each
+ // capacity is Quantity.
+ //
+ // If an unknown prefix is used as a lookup in either device.attributes
+ // or device.capacity, an empty map will be returned. Any reference to
+ // an unknown field will cause an evaluation error and allocation to
+ // abort.
+ //
+ // A robust expression should check for the existence of attributes
+ // before referencing them.
+ //
+ // For ease of use, the cel.bind() function is enabled, and can be used
+ // to simplify expressions that access multiple attributes with the
+ // same domain. For example:
+ //
+ // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+ //
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
+ //
+ // +required
+ optional string expression = 1;
+}
+
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
+message Device {
+ // Name is unique identifier among all devices managed by
+ // the driver in the pool. It must be a DNS label.
+ //
+ // +required
+ optional string name = 1;
+
+ // Basic defines one device instance.
+ //
+ // +optional
+ // +oneOf=deviceType
+ optional BasicDevice basic = 2;
+}
+
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
+message DeviceAllocationConfiguration {
+ // Source records whether the configuration comes from a class and thus
+ // is not something that a normal user would have been able to set
+ // or from a claim.
+ //
+ // +required
+ optional string source = 1;
+
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, its applies to all requests.
+ //
+ // +optional
+ // +listType=atomic
+ repeated string requests = 2;
+
+ optional DeviceConfiguration deviceConfiguration = 3;
+}
+
+// DeviceAllocationResult is the result of allocating devices.
+message DeviceAllocationResult {
+ // Results lists all allocated devices.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceRequestAllocationResult results = 1;
+
+ // This field is a combination of all the claim and class configuration parameters.
+ // Drivers can distinguish between those based on a flag.
+ //
+ // This includes configuration parameters for drivers which have no allocated
+ // devices in the result because it is up to the drivers which configuration
+ // parameters they support. They can silently ignore unknown configuration
+ // parameters.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceAllocationConfiguration config = 2;
+}
+
+// DeviceAttribute must have exactly one field set.
+message DeviceAttribute {
+ // IntValue is a number.
+ //
+ // +optional
+ // +oneOf=ValueType
+ optional int64 int = 2;
+
+ // BoolValue is a true/false value.
+ //
+ // +optional
+ // +oneOf=ValueType
+ optional bool bool = 3;
+
+ // StringValue is a string. Must not be longer than 64 characters.
+ //
+ // +optional
+ // +oneOf=ValueType
+ optional string string = 4;
+
+ // VersionValue is a semantic version according to semver.org spec 2.0.0.
+ // Must not be longer than 64 characters.
+ //
+ // +optional
+ // +oneOf=ValueType
+ optional string version = 5;
+}
+
+// DeviceCapacity describes a quantity associated with a device.
+message DeviceCapacity {
+ // Value defines how much of a certain device capacity is available.
+ //
+ // +required
+ optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
+}
+
+// DeviceClaim defines how to request devices with a ResourceClaim.
+message DeviceClaim {
+ // Requests represent individual requests for distinct devices which
+ // must all be satisfied. If empty, nothing needs to be allocated.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceRequest requests = 1;
+
+ // These constraints must be satisfied by the set of devices that get
+ // allocated for the claim.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceConstraint constraints = 2;
+
+ // This field holds configuration for multiple potential drivers which
+ // could satisfy requests in this claim. It is ignored while allocating
+ // the claim.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceClaimConfiguration config = 3;
+}
+
+// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
+message DeviceClaimConfiguration {
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, it applies to all requests.
+ //
+ // +optional
+ // +listType=atomic
+ repeated string requests = 1;
+
+ optional DeviceConfiguration deviceConfiguration = 2;
+}
+
+// DeviceClass is a vendor- or admin-provided resource that contains
+// device configuration and selectors. It can be referenced in
+// the device requests of a claim to apply these presets.
+// Cluster scoped.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+message DeviceClass {
+ // Standard object metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines what can be allocated and how to configure it.
+ //
+ // This is mutable. Consumers have to be prepared for classes changing
+ // at any time, either because they get updated or replaced. Claim
+ // allocations are done once based on whatever was set in classes at
+ // the time of allocation.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ optional DeviceClassSpec spec = 2;
+}
+
+// DeviceClassConfiguration is used in DeviceClass.
+message DeviceClassConfiguration {
+ optional DeviceConfiguration deviceConfiguration = 1;
+}
+
+// DeviceClassList is a collection of classes.
+message DeviceClassList {
+ // Standard list metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of resource classes.
+ repeated DeviceClass items = 2;
+}
+
+// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
+// and how to configure it.
+message DeviceClassSpec {
+ // Each selector must be satisfied by a device which is claimed via this class.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceSelector selectors = 1;
+
+ // Config defines configuration parameters that apply to each device that is claimed via this class.
+ // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
+ // configuration applies to exactly one driver.
+ //
+ // They are passed to the driver, but are not considered while allocating the claim.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceClassConfiguration config = 2;
+}
+
+// DeviceConfiguration must have exactly one field set. It gets embedded
+// inline in some other structs which have other fields, so field names must
+// not conflict with those.
+message DeviceConfiguration {
+ // Opaque provides driver-specific configuration parameters.
+ //
+ // +optional
+ // +oneOf=ConfigurationType
+ optional OpaqueDeviceConfiguration opaque = 1;
+}
+
+// DeviceConstraint must have exactly one field set besides Requests.
+message DeviceConstraint {
+ // Requests is a list of the one or more requests in this claim which
+ // must co-satisfy this constraint. If a request is fulfilled by
+ // multiple devices, then all of the devices must satisfy the
+ // constraint. If this is not specified, this constraint applies to all
+ // requests in this claim.
+ //
+ // +optional
+ // +listType=atomic
+ repeated string requests = 1;
+
+ // MatchAttribute requires that all devices in question have this
+ // attribute and that its type and value are the same across those
+ // devices.
+ //
+ // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
+ // then only devices in the same NUMA node will be chosen. A device which
+ // does not have that attribute will not be chosen. All devices should
+ // use a value of the same type for this attribute because that is part of
+ // its specification, but if one device doesn't, then it also will not be
+ // chosen.
+ //
+ // Must include the domain qualifier.
+ //
+ // +optional
+ // +oneOf=ConstraintType
+ optional string matchAttribute = 2;
+}
+
+// DeviceRequest is a request for devices required for a claim.
+// This is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// A DeviceClassName is currently required. Clients must check that it is
+// indeed set. It's absence indicates that something changed in a way that
+// is not supported by the client yet, in which case it must refuse to
+// handle the request.
+message DeviceRequest {
+ // Name can be used to reference this request in a pod.spec.containers[].resources.claims
+ // entry and in a constraint of the claim.
+ //
+ // Must be a DNS label.
+ //
+ // +required
+ optional string name = 1;
+
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // request.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ //
+ // +required
+ optional string deviceClassName = 2;
+
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ //
+ // +optional
+ // +listType=atomic
+ repeated DeviceSelector selectors = 3;
+
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AlloctionMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ //
+ // +optional
+ optional string allocationMode = 4;
+
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ //
+ // +optional
+ // +oneOf=AllocationMode
+ optional int64 count = 5;
+
+ // AdminAccess indicates that this is a claim for administrative access
+ // to the device(s). Claims with AdminAccess are expected to be used for
+ // monitoring or other management services for a device. They ignore
+ // all ordinary claims to the device with respect to access modes and
+ // any resource allocations.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ optional bool adminAccess = 6;
+}
+
+// DeviceRequestAllocationResult contains the allocation result for one request.
+message DeviceRequestAllocationResult {
+ // Request is the name of the request in the claim which caused this
+ // device to be allocated. Multiple devices may have been allocated
+ // per request.
+ //
+ // +required
+ optional string request = 1;
+
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ optional string driver = 2;
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ optional string pool = 3;
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ optional string device = 4;
+
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ optional bool adminAccess = 5;
+}
+
+// DeviceSelector must have exactly one field set.
+message DeviceSelector {
+ // CEL contains a CEL expression for selecting a device.
+ //
+ // +optional
+ // +oneOf=SelectorType
+ optional CELDeviceSelector cel = 1;
+}
+
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
+message NetworkDeviceData {
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ //
+ // +optional
+ optional string interfaceName = 1;
+
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ //
+ // +optional
+ // +listType=atomic
+ repeated string ips = 2;
+
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ //
+ // +optional
+ optional string hardwareAddress = 3;
+}
+
+// OpaqueDeviceConfiguration contains configuration parameters for a driver
+// in a format defined by the driver vendor.
+message OpaqueDeviceConfiguration {
+ // Driver is used to determine which kubelet plugin needs
+ // to be passed these configuration parameters.
+ //
+ // An admission policy provided by the driver developer could use this
+ // to decide whether it needs to validate them.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ optional string driver = 1;
+
+ // Parameters can contain arbitrary data. It is the responsibility of
+ // the driver developer to handle validation and versioning. Typically this
+ // includes self-identification and a version ("kind" + "apiVersion" for
+ // Kubernetes types), with conversion between different versions.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +required
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2;
+}
+
+// ResourceClaim describes a request for access to resources in the cluster,
+// for use by workloads. For example, if a workload needs an accelerator device
+// with specific properties, this is how that request is expressed. The status
+// stanza tracks whether this claim has been satisfied and what specific
+// resources have been allocated.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+message ResourceClaim {
+ // Standard object metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec describes what is being requested and how to configure it.
+ // The spec is immutable.
+ optional ResourceClaimSpec spec = 2;
+
+ // Status describes whether the claim is ready to use and what has been allocated.
+ // +optional
+ optional ResourceClaimStatus status = 3;
+}
+
+// ResourceClaimConsumerReference contains enough information to let you
+// locate the consumer of a ResourceClaim. The user must be a resource in the same
+// namespace as the ResourceClaim.
+message ResourceClaimConsumerReference {
+ // APIGroup is the group for the resource being referenced. It is
+ // empty for the core API. This matches the group in the APIVersion
+ // that is used when creating the resources.
+ // +optional
+ optional string apiGroup = 1;
+
+ // Resource is the type of resource being referenced, for example "pods".
+ // +required
+ optional string resource = 3;
+
+ // Name is the name of resource being referenced.
+ // +required
+ optional string name = 4;
+
+ // UID identifies exactly one incarnation of the resource.
+ // +required
+ optional string uid = 5;
+}
+
+// ResourceClaimList is a collection of claims.
+message ResourceClaimList {
+ // Standard list metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of resource claims.
+ repeated ResourceClaim items = 2;
+}
+
+// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
+message ResourceClaimSpec {
+ // Devices defines how to request devices.
+ //
+ // +optional
+ optional DeviceClaim devices = 1;
+}
+
+// ResourceClaimStatus tracks whether the resource has been allocated and what
+// the result of that was.
+message ResourceClaimStatus {
+ // Allocation is set once the claim has been allocated successfully.
+ //
+ // +optional
+ optional AllocationResult allocation = 1;
+
+ // ReservedFor indicates which entities are currently allowed to use
+ // the claim. A Pod which references a ResourceClaim which is not
+ // reserved for that Pod will not be started. A claim that is in
+ // use or might be in use because it has been reserved must not get
+ // deallocated.
+ //
+ // In a cluster with multiple scheduler instances, two pods might get
+ // scheduled concurrently by different schedulers. When they reference
+ // the same ResourceClaim which already has reached its maximum number
+ // of consumers, only one pod can be scheduled.
+ //
+ // Both schedulers try to add their pod to the claim.status.reservedFor
+ // field, but only the update that reaches the API server first gets
+ // stored. The other one fails with an error and the scheduler
+ // which issued it knows that it must put the pod back into the queue,
+ // waiting for the ResourceClaim to become usable again.
+ //
+ // There can be at most 256 such reservations. This may get increased in
+ // the future, but not reduced.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=uid
+ // +patchStrategy=merge
+ // +patchMergeKey=uid
+ repeated ResourceClaimConsumerReference reservedFor = 2;
+
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=driver
+ // +listMapKey=device
+ // +listMapKey=pool
+ // +featureGate=DRAResourceClaimDeviceStatus
+ repeated AllocatedDeviceStatus devices = 4;
+}
+
+// ResourceClaimTemplate is used to produce ResourceClaim objects.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+message ResourceClaimTemplate {
+ // Standard object metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Describes the ResourceClaim that is to be generated.
+ //
+ // This field is immutable. A ResourceClaim will get created by the
+ // control plane for a Pod when needed and then not get updated
+ // anymore.
+ optional ResourceClaimTemplateSpec spec = 2;
+}
+
+// ResourceClaimTemplateList is a collection of claim templates.
+message ResourceClaimTemplateList {
+ // Standard list metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of resource claim templates.
+ repeated ResourceClaimTemplate items = 2;
+}
+
+// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
+message ResourceClaimTemplateSpec {
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec for the ResourceClaim. The entire content is copied unchanged
+ // into the ResourceClaim that gets created from this template. The
+ // same fields as in a ResourceClaim are also valid here.
+ optional ResourceClaimSpec spec = 2;
+}
+
+// ResourcePool describes the pool that ResourceSlices belong to.
+message ResourcePool {
+ // Name is used to identify the pool. For node-local devices, this
+ // is often the node name, but this is not required.
+ //
+ // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
+ // separated by slashes. This field is immutable.
+ //
+ // +required
+ optional string name = 1;
+
+ // Generation tracks the change in a pool over time. Whenever a driver
+ // changes something about one or more of the resources in a pool, it
+ // must change the generation in all ResourceSlices which are part of
+ // that pool. Consumers of ResourceSlices should only consider
+ // resources from the pool with the highest generation number. The
+ // generation may be reset by drivers, which should be fine for
+ // consumers, assuming that all ResourceSlices in a pool are updated to
+ // match or deleted.
+ //
+ // Combined with ResourceSliceCount, this mechanism enables consumers to
+ // detect pools which are comprised of multiple ResourceSlices and are
+ // in an incomplete state.
+ //
+ // +required
+ optional int64 generation = 2;
+
+ // ResourceSliceCount is the total number of ResourceSlices in the pool at this
+ // generation number. Must be greater than zero.
+ //
+ // Consumers can use this to check whether they have seen all ResourceSlices
+ // belonging to the same pool.
+ //
+ // +required
+ optional int64 resourceSliceCount = 3;
+}
+
+// ResourceSlice represents one or more resources in a pool of similar resources,
+// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
+// ResourceSlices comprise a pool is determined by the driver.
+//
+// At the moment, the only supported resources are devices with attributes and capacities.
+// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
+// The ResourceSlice in which a device gets published may change over time. The unique identifier
+// for a device is the tuple , , .
+//
+// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
+// and updates all ResourceSlices with that new number and new resource definitions. A consumer
+// must only use ResourceSlices with the highest generation number and ignore all others.
+//
+// When allocating all resources in a pool matching certain criteria or when
+// looking for the best solution among several different alternatives, a
+// consumer should check the number of ResourceSlices in a pool (included in
+// each ResourceSlice) to determine whether its view of a pool is complete and
+// if not, should wait until the driver has completed updating the pool.
+//
+// For resources that are not local to a node, the node name is not set. Instead,
+// the driver may use a node selector to specify where the devices are available.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+message ResourceSlice {
+ // Standard object metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Contains the information published by the driver.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ optional ResourceSliceSpec spec = 2;
+}
+
+// ResourceSliceList is a collection of ResourceSlices.
+message ResourceSliceList {
+ // Standard list metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of resource ResourceSlices.
+ repeated ResourceSlice items = 2;
+}
+
+// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
+message ResourceSliceSpec {
+ // Driver identifies the DRA driver providing the capacity information.
+ // A field selector can be used to list only ResourceSlice
+ // objects with a certain driver name.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. This field is immutable.
+ //
+ // +required
+ optional string driver = 1;
+
+ // Pool describes the pool that this ResourceSlice belongs to.
+ //
+ // +required
+ optional ResourcePool pool = 2;
+
+ // NodeName identifies the node which provides the resources in this pool.
+ // A field selector can be used to list only ResourceSlice
+ // objects belonging to a certain node.
+ //
+ // This field can be used to limit access from nodes to ResourceSlices with
+ // the same node name. It also indicates to autoscalers that adding
+ // new nodes of the same type as some old node might also make new
+ // resources available.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ // This field is immutable.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ optional string nodeName = 3;
+
+ // NodeSelector defines which nodes have access to the resources in the pool,
+ // when that pool is not limited to a single node.
+ //
+ // Must use exactly one term.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4;
+
+ // AllNodes indicates that all nodes have access to the resources in the pool.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ optional bool allNodes = 5;
+
+ // Devices lists some or all of the devices in this pool.
+ //
+ // Must not have more than 128 entries.
+ //
+ // +optional
+ // +listType=atomic
+ repeated Device devices = 6;
+}
+
diff --git a/vendor/k8s.io/api/resource/v1beta1/register.go b/vendor/k8s.io/api/resource/v1beta1/register.go
new file mode 100644
index 000000000..ce0a1d930
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/register.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "resource.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &DeviceClass{},
+ &DeviceClassList{},
+ &ResourceClaim{},
+ &ResourceClaimList{},
+ &ResourceClaimTemplate{},
+ &ResourceClaimTemplateList{},
+ &ResourceSlice{},
+ &ResourceSliceList{},
+ )
+
+ // Add the watch version that applies
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go
new file mode 100644
index 000000000..fbdc35ca8
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/types.go
@@ -0,0 +1,1088 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/validation"
+)
+
+const (
+ // Finalizer is the finalizer that gets set for claims
+ // which were allocated through a builtin controller.
+ // Reserved for use by Kubernetes, DRA driver controllers must
+ // use their own finalizer.
+ Finalizer = "resource.kubernetes.io/delete-protection"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceSlice represents one or more resources in a pool of similar resources,
+// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
+// ResourceSlices comprise a pool is determined by the driver.
+//
+// At the moment, the only supported resources are devices with attributes and capacities.
+// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
+// The ResourceSlice in which a device gets published may change over time. The unique identifier
+// for a device is the tuple , , .
+//
+// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
+// and updates all ResourceSlices with that new number and new resource definitions. A consumer
+// must only use ResourceSlices with the highest generation number and ignore all others.
+//
+// When allocating all resources in a pool matching certain criteria or when
+// looking for the best solution among several different alternatives, a
+// consumer should check the number of ResourceSlices in a pool (included in
+// each ResourceSlice) to determine whether its view of a pool is complete and
+// if not, should wait until the driver has completed updating the pool.
+//
+// For resources that are not local to a node, the node name is not set. Instead,
+// the driver may use a node selector to specify where the devices are available.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+type ResourceSlice struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Contains the information published by the driver.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+}
+
+const (
+ // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions]
+ // field selector to filter based on [ResourceSliceSpec.NodeName].
+ ResourceSliceSelectorNodeName = "spec.nodeName"
+ // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions]
+ // field selector to filter based on [ResourceSliceSpec.Driver].
+ ResourceSliceSelectorDriver = "spec.driver"
+)
+
+// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
+type ResourceSliceSpec struct {
+ // Driver identifies the DRA driver providing the capacity information.
+ // A field selector can be used to list only ResourceSlice
+ // objects with a certain driver name.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. This field is immutable.
+ //
+ // +required
+ Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
+
+ // Pool describes the pool that this ResourceSlice belongs to.
+ //
+ // +required
+ Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"`
+
+ // NodeName identifies the node which provides the resources in this pool.
+ // A field selector can be used to list only ResourceSlice
+ // objects belonging to a certain node.
+ //
+ // This field can be used to limit access from nodes to ResourceSlices with
+ // the same node name. It also indicates to autoscalers that adding
+ // new nodes of the same type as some old node might also make new
+ // resources available.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ // This field is immutable.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"`
+
+ // NodeSelector defines which nodes have access to the resources in the pool,
+ // when that pool is not limited to a single node.
+ //
+ // Must use exactly one term.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"`
+
+ // AllNodes indicates that all nodes have access to the resources in the pool.
+ //
+ // Exactly one of NodeName, NodeSelector and AllNodes must be set.
+ //
+ // +optional
+ // +oneOf=NodeSelection
+ AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"`
+
+ // Devices lists some or all of the devices in this pool.
+ //
+ // Must not have more than 128 entries.
+ //
+ // +optional
+ // +listType=atomic
+ Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
+}
+
+// DriverNameMaxLength is the maximum valid length of a driver name in the
+// ResourceSliceSpec and other places. It's the same as for CSI driver names.
+const DriverNameMaxLength = 63
+
+// ResourcePool describes the pool that ResourceSlices belong to.
+type ResourcePool struct {
+ // Name is used to identify the pool. For node-local devices, this
+ // is often the node name, but this is not required.
+ //
+ // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
+ // separated by slashes. This field is immutable.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+ // Generation tracks the change in a pool over time. Whenever a driver
+ // changes something about one or more of the resources in a pool, it
+ // must change the generation in all ResourceSlices which are part of
+ // that pool. Consumers of ResourceSlices should only consider
+ // resources from the pool with the highest generation number. The
+ // generation may be reset by drivers, which should be fine for
+ // consumers, assuming that all ResourceSlices in a pool are updated to
+ // match or deleted.
+ //
+ // Combined with ResourceSliceCount, this mechanism enables consumers to
+ // detect pools which are comprised of multiple ResourceSlices and are
+ // in an incomplete state.
+ //
+ // +required
+ Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"`
+
+ // ResourceSliceCount is the total number of ResourceSlices in the pool at this
+ // generation number. Must be greater than zero.
+ //
+ // Consumers can use this to check whether they have seen all ResourceSlices
+ // belonging to the same pool.
+ //
+ // +required
+ ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"`
+}
+
+const ResourceSliceMaxSharedCapacity = 128
+const ResourceSliceMaxDevices = 128
+const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name.
+
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
+type Device struct {
+ // Name is unique identifier among all devices managed by
+ // the driver in the pool. It must be a DNS label.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+ // Basic defines one device instance.
+ //
+ // +optional
+ // +oneOf=deviceType
+ Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"`
+}
+
+// BasicDevice defines one device instance.
+type BasicDevice struct {
+ // Attributes defines the set of attributes for this device.
+ // The name of each attribute must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ //
+ // +optional
+ Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"`
+
+ // Capacity defines the set of capacities for this device.
+ // The name of each capacity must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ //
+ // +optional
+ Capacity map[QualifiedName]DeviceCapacity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"`
+}
+
+// DeviceCapacity describes a quantity associated with a device.
+type DeviceCapacity struct {
+ // Value defines how much of a certain device capacity is available.
+ //
+ // +required
+ Value resource.Quantity `json:"value" protobuf:"bytes,1,rep,name=value"`
+
+ // potential future addition: fields which define how to "consume"
+ // capacity (= share a single device between different consumers).
+}
+
+// Limit for the sum of the number of entries in both attributes and capacity.
+const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
+
+// QualifiedName is the name of a device attribute or capacity.
+//
+// Attributes and capacities are defined either by the owner of the specific
+// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes
+// project). Because they are sometimes compared across devices, a given name
+// is expected to mean the same thing and have the same type on all devices.
+//
+// Names must be either a C identifier (e.g. "theName") or a DNS subdomain
+// followed by a slash ("/") followed by a C identifier
+// (e.g. "dra.example.com/theName"). Names which do not include the
+// domain prefix are assumed to be part of the driver's domain. Attributes
+// or capacities defined by 3rd parties must include the domain prefix.
+//
+// The maximum length for the DNS subdomain is 63 characters (same as
+// for driver names) and the maximum length of the C identifier
+// is 32.
+type QualifiedName string
+
+// FullyQualifiedName is a QualifiedName where the domain is set.
+type FullyQualifiedName string
+
+// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name.
+const DeviceMaxDomainLength = 63
+
+// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`).
+const DeviceMaxIDLength = 32
+
+// DeviceAttribute must have exactly one field set.
+type DeviceAttribute struct {
+ // The Go field names below have a Value suffix to avoid a conflict between the
+ // field "String" and the corresponding method. That method is required.
+ // The Kubernetes API is defined without that suffix to keep it more natural.
+
+ // IntValue is a number.
+ //
+ // +optional
+ // +oneOf=ValueType
+ IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"`
+
+ // BoolValue is a true/false value.
+ //
+ // +optional
+ // +oneOf=ValueType
+ BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"`
+
+ // StringValue is a string. Must not be longer than 64 characters.
+ //
+ // +optional
+ // +oneOf=ValueType
+ StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"`
+
+ // VersionValue is a semantic version according to semver.org spec 2.0.0.
+ // Must not be longer than 64 characters.
+ //
+ // +optional
+ // +oneOf=ValueType
+ VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"`
+}
+
+// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value.
+const DeviceAttributeMaxValueLength = 64
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceSliceList is a collection of ResourceSlices.
+type ResourceSliceList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of resource ResourceSlices.
+ Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceClaim describes a request for access to resources in the cluster,
+// for use by workloads. For example, if a workload needs an accelerator device
+// with specific properties, this is how that request is expressed. The status
+// stanza tracks whether this claim has been satisfied and what specific
+// resources have been allocated.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+type ResourceClaim struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec describes what is being requested and how to configure it.
+ // The spec is immutable.
+ Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+
+ // Status describes whether the claim is ready to use and what has been allocated.
+ // +optional
+ Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
+type ResourceClaimSpec struct {
+ // Devices defines how to request devices.
+ //
+ // +optional
+ Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"`
+
+ // Controller is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"`
+}
+
+// DeviceClaim defines how to request devices with a ResourceClaim.
+type DeviceClaim struct {
+ // Requests represent individual requests for distinct devices which
+ // must all be satisfied. If empty, nothing needs to be allocated.
+ //
+ // +optional
+ // +listType=atomic
+ Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"`
+
+ // These constraints must be satisfied by the set of devices that get
+ // allocated for the claim.
+ //
+ // +optional
+ // +listType=atomic
+ Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"`
+
+ // This field holds configuration for multiple potential drivers which
+ // could satisfy requests in this claim. It is ignored while allocating
+ // the claim.
+ //
+ // +optional
+ // +listType=atomic
+ Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
+
+ // Potential future extension, ignored by older schedulers. This is
+ // fine because scoring allows users to define a preference, without
+ // making it a hard requirement.
+ //
+ // Score *SomeScoringStruct
+}
+
+const (
+ DeviceRequestsMaxSize = AllocationResultsMaxSize
+ DeviceConstraintsMaxSize = 32
+ DeviceConfigMaxSize = 32
+)
+
+// DeviceRequest is a request for devices required for a claim.
+// This is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// A DeviceClassName is currently required. Clients must check that it is
+// indeed set. It's absence indicates that something changed in a way that
+// is not supported by the client yet, in which case it must refuse to
+// handle the request.
+type DeviceRequest struct {
+ // Name can be used to reference this request in a pod.spec.containers[].resources.claims
+ // entry and in a constraint of the claim.
+ //
+ // Must be a DNS label.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // request.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ //
+ // +required
+ DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"`
+
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ //
+ // +optional
+ // +listType=atomic
+ Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
+
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AlloctionMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ //
+ // +optional
+ AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"`
+
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ //
+ // +optional
+ // +oneOf=AllocationMode
+ Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"`
+
+ // AdminAccess indicates that this is a claim for administrative access
+ // to the device(s). Claims with AdminAccess are expected to be used for
+ // monitoring or other management services for a device. They ignore
+ // all ordinary claims to the device with respect to access modes and
+ // any resource allocations.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
+}
+
+const (
+ DeviceSelectorsMaxSize = 32
+)
+
+type DeviceAllocationMode string
+
+// Valid [DeviceRequest.CountMode] values.
+const (
+ DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount")
+ DeviceAllocationModeAll = DeviceAllocationMode("All")
+)
+
+// DeviceSelector must have exactly one field set.
+type DeviceSelector struct {
+ // CEL contains a CEL expression for selecting a device.
+ //
+ // +optional
+ // +oneOf=SelectorType
+ CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"`
+}
+
+// CELDeviceSelector contains a CEL expression for selecting a device.
+type CELDeviceSelector struct {
+ // Expression is a CEL expression which evaluates a single device. It
+ // must evaluate to true when the device under consideration satisfies
+ // the desired criteria, and false when it does not. Any other result
+ // is an error and causes allocation of devices to abort.
+ //
+ // The expression's input is an object named "device", which carries
+ // the following properties:
+ // - driver (string): the name of the driver which defines this device.
+ // - attributes (map[string]object): the device's attributes, grouped by prefix
+ // (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+ // of the attributes which were prefixed by "dra.example.com".
+ // - capacity (map[string]object): the device's capacities, grouped by prefix.
+ //
+ // Example: Consider a device with driver="dra.example.com", which exposes
+ // two attributes named "model" and "ext.example.com/family" and which
+ // exposes one capacity named "modules". This input to this expression
+ // would have the following fields:
+ //
+ // device.driver
+ // device.attributes["dra.example.com"].model
+ // device.attributes["ext.example.com"].family
+ // device.capacity["dra.example.com"].modules
+ //
+ // The device.driver field can be used to check for a specific driver,
+ // either as a high-level precondition (i.e. you only want to consider
+ // devices from this driver) or as part of a multi-clause expression
+ // that is meant to consider devices from different drivers.
+ //
+ // The value type of each attribute is defined by the device
+ // definition, and users who write these expressions must consult the
+ // documentation for their specific drivers. The value type of each
+ // capacity is Quantity.
+ //
+ // If an unknown prefix is used as a lookup in either device.attributes
+ // or device.capacity, an empty map will be returned. Any reference to
+ // an unknown field will cause an evaluation error and allocation to
+ // abort.
+ //
+ // A robust expression should check for the existence of attributes
+ // before referencing them.
+ //
+ // For ease of use, the cel.bind() function is enabled, and can be used
+ // to simplify expressions that access multiple attributes with the
+ // same domain. For example:
+ //
+ // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+ //
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
+ //
+ // +required
+ Expression string `json:"expression" protobuf:"bytes,1,name=expression"`
+}
+
+// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector
+// evaluation.
+//
+// There is no overall budget for selecting a device, so the actual time
+// required for that is proportional to the number of CEL selectors and how
+// often they need to be evaluated, which can vary depending on several factors
+// (number of devices, cluster utilization, additional constraints).
+//
+// Validation against this limit and [CELSelectorExpressionMaxLength] happens
+// only when setting an expression for the first time or when changing it. If
+// the limits are changed in a future Kubernetes release, existing users are
+// guaranteed that existing expressions will continue to be valid.
+//
+// However, the kube-scheduler also applies this cost limit at runtime, so it
+// could happen that a valid expression fails at runtime after an up- or
+// downgrade. This can also happen without version skew when the cost estimate
+// underestimated the actual cost. That this might happen is the reason why
+// kube-scheduler enforces the runtime limit instead of relying on validation.
+//
+// According to
+// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22,
+// this gives roughly 0.1 second for each expression evaluation.
+// However, this depends on how fast the machine is.
+const CELSelectorExpressionMaxCost = 1000000
+
+// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string.
+const CELSelectorExpressionMaxLength = 10 * 1024
+
+// DeviceConstraint must have exactly one field set besides Requests.
+type DeviceConstraint struct {
+ // Requests is a list of the one or more requests in this claim which
+ // must co-satisfy this constraint. If a request is fulfilled by
+ // multiple devices, then all of the devices must satisfy the
+ // constraint. If this is not specified, this constraint applies to all
+ // requests in this claim.
+ //
+ // +optional
+ // +listType=atomic
+ Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
+
+ // MatchAttribute requires that all devices in question have this
+ // attribute and that its type and value are the same across those
+ // devices.
+ //
+ // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
+ // then only devices in the same NUMA node will be chosen. A device which
+ // does not have that attribute will not be chosen. All devices should
+ // use a value of the same type for this attribute because that is part of
+ // its specification, but if one device doesn't, then it also will not be
+ // chosen.
+ //
+ // Must include the domain qualifier.
+ //
+ // +optional
+ // +oneOf=ConstraintType
+ MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
+
+ // Potential future extension, not part of the current design:
+ // A CEL expression which compares different devices and returns
+ // true if they match.
+ //
+ // Because it would be part of a one-of, old schedulers will not
+ // accidentally ignore this additional, for them unknown match
+ // criteria.
+ //
+ // MatchExpression string
+}
+
+// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
+type DeviceClaimConfiguration struct {
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, it applies to all requests.
+ //
+ // +optional
+ // +listType=atomic
+ Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
+
+ DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"`
+}
+
+// DeviceConfiguration must have exactly one field set. It gets embedded
+// inline in some other structs which have other fields, so field names must
+// not conflict with those.
+type DeviceConfiguration struct {
+ // Opaque provides driver-specific configuration parameters.
+ //
+ // +optional
+ // +oneOf=ConfigurationType
+ Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"`
+}
+
+// OpaqueDeviceConfiguration contains configuration parameters for a driver
+// in a format defined by the driver vendor.
+type OpaqueDeviceConfiguration struct {
+ // Driver is used to determine which kubelet plugin needs
+ // to be passed these configuration parameters.
+ //
+ // An admission policy provided by the driver developer could use this
+ // to decide whether it needs to validate them.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
+
+ // Parameters can contain arbitrary data. It is the responsibility of
+ // the driver developer to handle validation and versioning. Typically this
+ // includes self-identification and a version ("kind" + "apiVersion" for
+ // Kubernetes types), with conversion between different versions.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +required
+ Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"`
+}
+
+// OpaqueParametersMaxLength is the maximum length of the raw data in an
+// [OpaqueDeviceConfiguration.Parameters] field.
+const OpaqueParametersMaxLength = 10 * 1024
+
+// ResourceClaimStatus tracks whether the resource has been allocated and what
+// the result of that was.
+type ResourceClaimStatus struct {
+ // Allocation is set once the claim has been allocated successfully.
+ //
+ // +optional
+ Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"`
+
+ // ReservedFor indicates which entities are currently allowed to use
+ // the claim. A Pod which references a ResourceClaim which is not
+ // reserved for that Pod will not be started. A claim that is in
+ // use or might be in use because it has been reserved must not get
+ // deallocated.
+ //
+ // In a cluster with multiple scheduler instances, two pods might get
+ // scheduled concurrently by different schedulers. When they reference
+ // the same ResourceClaim which already has reached its maximum number
+ // of consumers, only one pod can be scheduled.
+ //
+ // Both schedulers try to add their pod to the claim.status.reservedFor
+ // field, but only the update that reaches the API server first gets
+ // stored. The other one fails with an error and the scheduler
+ // which issued it knows that it must put the pod back into the queue,
+ // waiting for the ResourceClaim to become usable again.
+ //
+ // There can be at most 256 such reservations. This may get increased in
+ // the future, but not reduced.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=uid
+ // +patchStrategy=merge
+ // +patchMergeKey=uid
+ ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
+
+ // DeallocationRequested is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"`
+
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=driver
+ // +listMapKey=device
+ // +listMapKey=pool
+ // +featureGate=DRAResourceClaimDeviceStatus
+ Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
+}
+
+// ResourceClaimReservedForMaxSize is the maximum number of entries in
+// claim.status.reservedFor.
+const ResourceClaimReservedForMaxSize = 256
+
+// ResourceClaimConsumerReference contains enough information to let you
+// locate the consumer of a ResourceClaim. The user must be a resource in the same
+// namespace as the ResourceClaim.
+type ResourceClaimConsumerReference struct {
+ // APIGroup is the group for the resource being referenced. It is
+ // empty for the core API. This matches the group in the APIVersion
+ // that is used when creating the resources.
+ // +optional
+ APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
+ // Resource is the type of resource being referenced, for example "pods".
+ // +required
+ Resource string `json:"resource" protobuf:"bytes,3,name=resource"`
+ // Name is the name of resource being referenced.
+ // +required
+ Name string `json:"name" protobuf:"bytes,4,name=name"`
+ // UID identifies exactly one incarnation of the resource.
+ // +required
+ UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"`
+}
+
+// AllocationResult contains attributes of an allocated resource.
+type AllocationResult struct {
+ // Devices is the result of allocating devices.
+ //
+ // +optional
+ Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"`
+
+ // NodeSelector defines where the allocated resources are available. If
+ // unset, they are available everywhere.
+ //
+ // +optional
+ NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"`
+
+ // Controller is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"`
+}
+
+// DeviceAllocationResult is the result of allocating devices.
+type DeviceAllocationResult struct {
+ // Results lists all allocated devices.
+ //
+ // +optional
+ // +listType=atomic
+ Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"`
+
+ // This field is a combination of all the claim and class configuration parameters.
+ // Drivers can distinguish between those based on a flag.
+ //
+ // This includes configuration parameters for drivers which have no allocated
+ // devices in the result because it is up to the drivers which configuration
+ // parameters they support. They can silently ignore unknown configuration
+ // parameters.
+ //
+ // +optional
+ // +listType=atomic
+ Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
+}
+
+// AllocationResultsMaxSize represents the maximum number of
+// entries in allocation.devices.results.
+const AllocationResultsMaxSize = 32
+
+// DeviceRequestAllocationResult contains the allocation result for one request.
+type DeviceRequestAllocationResult struct {
+ // Request is the name of the request in the claim which caused this
+ // device to be allocated. Multiple devices may have been allocated
+ // per request.
+ //
+ // +required
+ Request string `json:"request" protobuf:"bytes,1,name=request"`
+
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ Driver string `json:"driver" protobuf:"bytes,2,name=driver"`
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ Pool string `json:"pool" protobuf:"bytes,3,name=pool"`
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ Device string `json:"device" protobuf:"bytes,4,name=device"`
+
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ //
+ // +optional
+ // +featureGate=DRAAdminAccess
+ AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"`
+}
+
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
+type DeviceAllocationConfiguration struct {
+ // Source records whether the configuration comes from a class and thus
+ // is not something that a normal user would have been able to set
+ // or from a claim.
+ //
+ // +required
+ Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"`
+
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, its applies to all requests.
+ //
+ // +optional
+ // +listType=atomic
+ Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"`
+
+ DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"`
+}
+
+type AllocationConfigSource string
+
+// Valid [DeviceAllocationConfiguration.Source] values.
+const (
+ AllocationConfigSourceClass = "FromClass"
+ AllocationConfigSourceClaim = "FromClaim"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceClaimList is a collection of claims.
+type ResourceClaimList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of resource claims.
+ Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// DeviceClass is a vendor- or admin-provided resource that contains
+// device configuration and selectors. It can be referenced in
+// the device requests of a claim to apply these presets.
+// Cluster scoped.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+type DeviceClass struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines what can be allocated and how to configure it.
+ //
+ // This is mutable. Consumers have to be prepared for classes changing
+ // at any time, either because they get updated or replaced. Claim
+ // allocations are done once based on whatever was set in classes at
+ // the time of allocation.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+}
+
+// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
+// and how to configure it.
+type DeviceClassSpec struct {
+ // Each selector must be satisfied by a device which is claimed via this class.
+ //
+ // +optional
+ // +listType=atomic
+ Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"`
+
+ // Config defines configuration parameters that apply to each device that is claimed via this class.
+ // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
+ // configuration applies to exactly one driver.
+ //
+ // They are passed to the driver, but are not considered while allocating the claim.
+ //
+ // +optional
+ // +listType=atomic
+ Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
+
+ // SuitableNodes is tombstoned since Kubernetes 1.32 where
+ // it got removed. May be reused once decoding v1alpha3 is no longer
+ // supported.
+ // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"`
+}
+
+// DeviceClassConfiguration is used in DeviceClass.
+type DeviceClassConfiguration struct {
+ DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// DeviceClassList is a collection of classes.
+type DeviceClassList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of resource classes.
+ Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceClaimTemplate is used to produce ResourceClaim objects.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
+type ResourceClaimTemplate struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Describes the ResourceClaim that is to be generated.
+ //
+ // This field is immutable. A ResourceClaim will get created by the
+ // control plane for a Pod when needed and then not get updated
+ // anymore.
+ Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+}
+
+// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
+type ResourceClaimTemplateSpec struct {
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec for the ResourceClaim. The entire content is copied unchanged
+ // into the ResourceClaim that gets created from this template. The
+ // same fields as in a ResourceClaim are also valid here.
+ Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.32
+
+// ResourceClaimTemplateList is a collection of claim templates.
+type ResourceClaimTemplateList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of resource claim templates.
+ Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+type AllocatedDeviceStatus struct {
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver.
+ //
+ // +required
+ Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"`
+
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ //
+ // +required
+ Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"`
+
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ //
+ // +required
+ Device string `json:"device" protobuf:"bytes,3,rep,name=device"`
+
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"`
+
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ //
+ // +optional
+ Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"`
+
+ // NetworkData contains network-related information specific to the device.
+ //
+ // +optional
+ NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"`
+}
+
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
+type NetworkDeviceData struct {
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ //
+ // +optional
+ InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"`
+
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ //
+ // +optional
+ // +listType=atomic
+ IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"`
+
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ //
+ // +optional
+ HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"`
+}
diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 000000000..4ecc35d08
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,386 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-codegen.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AllocatedDeviceStatus = map[string]string{
+ "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
+ "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
+ "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.",
+ "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
+ "networkData": "NetworkData contains network-related information specific to the device.",
+}
+
+func (AllocatedDeviceStatus) SwaggerDoc() map[string]string {
+ return map_AllocatedDeviceStatus
+}
+
+var map_AllocationResult = map[string]string{
+ "": "AllocationResult contains attributes of an allocated resource.",
+ "devices": "Devices is the result of allocating devices.",
+ "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.",
+}
+
+func (AllocationResult) SwaggerDoc() map[string]string {
+ return map_AllocationResult
+}
+
+var map_BasicDevice = map[string]string{
+ "": "BasicDevice defines one device instance.",
+ "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
+ "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
+}
+
+func (BasicDevice) SwaggerDoc() map[string]string {
+ return map_BasicDevice
+}
+
+var map_CELDeviceSelector = map[string]string{
+ "": "CELDeviceSelector contains a CEL expression for selecting a device.",
+ "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.",
+}
+
+func (CELDeviceSelector) SwaggerDoc() map[string]string {
+ return map_CELDeviceSelector
+}
+
+var map_Device = map[string]string{
+ "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.",
+ "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.",
+ "basic": "Basic defines one device instance.",
+}
+
+func (Device) SwaggerDoc() map[string]string {
+ return map_Device
+}
+
+var map_DeviceAllocationConfiguration = map[string]string{
+ "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.",
+ "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.",
+ "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.",
+}
+
+func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string {
+ return map_DeviceAllocationConfiguration
+}
+
+var map_DeviceAllocationResult = map[string]string{
+ "": "DeviceAllocationResult is the result of allocating devices.",
+ "results": "Results lists all allocated devices.",
+ "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.",
+}
+
+func (DeviceAllocationResult) SwaggerDoc() map[string]string {
+ return map_DeviceAllocationResult
+}
+
+var map_DeviceAttribute = map[string]string{
+ "": "DeviceAttribute must have exactly one field set.",
+ "int": "IntValue is a number.",
+ "bool": "BoolValue is a true/false value.",
+ "string": "StringValue is a string. Must not be longer than 64 characters.",
+ "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.",
+}
+
+func (DeviceAttribute) SwaggerDoc() map[string]string {
+ return map_DeviceAttribute
+}
+
+var map_DeviceCapacity = map[string]string{
+ "": "DeviceCapacity describes a quantity associated with a device.",
+ "value": "Value defines how much of a certain device capacity is available.",
+}
+
+func (DeviceCapacity) SwaggerDoc() map[string]string {
+ return map_DeviceCapacity
+}
+
+var map_DeviceClaim = map[string]string{
+ "": "DeviceClaim defines how to request devices with a ResourceClaim.",
+ "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.",
+ "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.",
+ "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.",
+}
+
+func (DeviceClaim) SwaggerDoc() map[string]string {
+ return map_DeviceClaim
+}
+
+var map_DeviceClaimConfiguration = map[string]string{
+ "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.",
+ "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.",
+}
+
+func (DeviceClaimConfiguration) SwaggerDoc() map[string]string {
+ return map_DeviceClaimConfiguration
+}
+
+var map_DeviceClass = map[string]string{
+ "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
+ "metadata": "Standard object metadata",
+ "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.",
+}
+
+func (DeviceClass) SwaggerDoc() map[string]string {
+ return map_DeviceClass
+}
+
+var map_DeviceClassConfiguration = map[string]string{
+ "": "DeviceClassConfiguration is used in DeviceClass.",
+}
+
+func (DeviceClassConfiguration) SwaggerDoc() map[string]string {
+ return map_DeviceClassConfiguration
+}
+
+var map_DeviceClassList = map[string]string{
+ "": "DeviceClassList is a collection of classes.",
+ "metadata": "Standard list metadata",
+ "items": "Items is the list of resource classes.",
+}
+
+func (DeviceClassList) SwaggerDoc() map[string]string {
+ return map_DeviceClassList
+}
+
+var map_DeviceClassSpec = map[string]string{
+ "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
+ "selectors": "Each selector must be satisfied by a device which is claimed via this class.",
+ "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
+}
+
+func (DeviceClassSpec) SwaggerDoc() map[string]string {
+ return map_DeviceClassSpec
+}
+
+var map_DeviceConfiguration = map[string]string{
+ "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.",
+ "opaque": "Opaque provides driver-specific configuration parameters.",
+}
+
+func (DeviceConfiguration) SwaggerDoc() map[string]string {
+ return map_DeviceConfiguration
+}
+
+var map_DeviceConstraint = map[string]string{
+ "": "DeviceConstraint must have exactly one field set besides Requests.",
+ "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.",
+ "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.",
+}
+
+func (DeviceConstraint) SwaggerDoc() map[string]string {
+ return map_DeviceConstraint
+}
+
+var map_DeviceRequest = map[string]string{
+ "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.",
+ "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.",
+ "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.",
+ "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.",
+ "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.",
+ "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.",
+ "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
+}
+
+func (DeviceRequest) SwaggerDoc() map[string]string {
+ return map_DeviceRequest
+}
+
+var map_DeviceRequestAllocationResult = map[string]string{
+ "": "DeviceRequestAllocationResult contains the allocation result for one request.",
+ "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
+ "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
+ "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
+}
+
+func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string {
+ return map_DeviceRequestAllocationResult
+}
+
+var map_DeviceSelector = map[string]string{
+ "": "DeviceSelector must have exactly one field set.",
+ "cel": "CEL contains a CEL expression for selecting a device.",
+}
+
+func (DeviceSelector) SwaggerDoc() map[string]string {
+ return map_DeviceSelector
+}
+
+var map_NetworkDeviceData = map[string]string{
+ "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.",
+ "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.",
+ "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.",
+ "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.",
+}
+
+func (NetworkDeviceData) SwaggerDoc() map[string]string {
+ return map_NetworkDeviceData
+}
+
+var map_OpaqueDeviceConfiguration = map[string]string{
+ "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
+ "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
+}
+
+func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string {
+ return map_OpaqueDeviceConfiguration
+}
+
+var map_ResourceClaim = map[string]string{
+ "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
+ "metadata": "Standard object metadata",
+ "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.",
+ "status": "Status describes whether the claim is ready to use and what has been allocated.",
+}
+
+func (ResourceClaim) SwaggerDoc() map[string]string {
+ return map_ResourceClaim
+}
+
+var map_ResourceClaimConsumerReference = map[string]string{
+ "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.",
+ "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
+ "resource": "Resource is the type of resource being referenced, for example \"pods\".",
+ "name": "Name is the name of resource being referenced.",
+ "uid": "UID identifies exactly one incarnation of the resource.",
+}
+
+func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string {
+ return map_ResourceClaimConsumerReference
+}
+
+var map_ResourceClaimList = map[string]string{
+ "": "ResourceClaimList is a collection of claims.",
+ "metadata": "Standard list metadata",
+ "items": "Items is the list of resource claims.",
+}
+
+func (ResourceClaimList) SwaggerDoc() map[string]string {
+ return map_ResourceClaimList
+}
+
+var map_ResourceClaimSpec = map[string]string{
+ "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.",
+ "devices": "Devices defines how to request devices.",
+}
+
+func (ResourceClaimSpec) SwaggerDoc() map[string]string {
+ return map_ResourceClaimSpec
+}
+
+var map_ResourceClaimStatus = map[string]string{
+ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
+ "allocation": "Allocation is set once the claim has been allocated successfully.",
+ "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.",
+ "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.",
+}
+
+func (ResourceClaimStatus) SwaggerDoc() map[string]string {
+ return map_ResourceClaimStatus
+}
+
+var map_ResourceClaimTemplate = map[string]string{
+ "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
+ "metadata": "Standard object metadata",
+ "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.",
+}
+
+func (ResourceClaimTemplate) SwaggerDoc() map[string]string {
+ return map_ResourceClaimTemplate
+}
+
+var map_ResourceClaimTemplateList = map[string]string{
+ "": "ResourceClaimTemplateList is a collection of claim templates.",
+ "metadata": "Standard list metadata",
+ "items": "Items is the list of resource claim templates.",
+}
+
+func (ResourceClaimTemplateList) SwaggerDoc() map[string]string {
+ return map_ResourceClaimTemplateList
+}
+
+var map_ResourceClaimTemplateSpec = map[string]string{
+ "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.",
+ "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.",
+ "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.",
+}
+
+func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string {
+ return map_ResourceClaimTemplateSpec
+}
+
+var map_ResourcePool = map[string]string{
+ "": "ResourcePool describes the pool that ResourceSlices belong to.",
+ "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.",
+ "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.",
+ "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.",
+}
+
+func (ResourcePool) SwaggerDoc() map[string]string {
+ return map_ResourcePool
+}
+
+var map_ResourceSlice = map[string]string{
+ "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
+ "metadata": "Standard object metadata",
+ "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.",
+}
+
+func (ResourceSlice) SwaggerDoc() map[string]string {
+ return map_ResourceSlice
+}
+
+var map_ResourceSliceList = map[string]string{
+ "": "ResourceSliceList is a collection of ResourceSlices.",
+ "metadata": "Standard list metadata",
+ "items": "Items is the list of resource ResourceSlices.",
+}
+
+func (ResourceSliceList) SwaggerDoc() map[string]string {
+ return map_ResourceSliceList
+}
+
+var map_ResourceSliceSpec = map[string]string{
+ "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.",
+ "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.",
+ "pool": "Pool describes the pool that this ResourceSlice belongs to.",
+ "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.",
+ "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.",
+ "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.",
+ "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.",
+}
+
+func (ResourceSliceSpec) SwaggerDoc() map[string]string {
+ return map_ResourceSliceSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..3be61333f
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,882 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Data.DeepCopyInto(&out.Data)
+ if in.NetworkData != nil {
+ in, out := &in.NetworkData, &out.NetworkData
+ *out = new(NetworkDeviceData)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus.
+func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AllocatedDeviceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
+ *out = *in
+ in.Devices.DeepCopyInto(&out.Devices)
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(corev1.NodeSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult.
+func (in *AllocationResult) DeepCopy() *AllocationResult {
+ if in == nil {
+ return nil
+ }
+ out := new(AllocationResult)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicDevice) DeepCopyInto(out *BasicDevice) {
+ *out = *in
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make(map[QualifiedName]DeviceAttribute, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Capacity != nil {
+ in, out := &in.Capacity, &out.Capacity
+ *out = make(map[QualifiedName]DeviceCapacity, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice.
+func (in *BasicDevice) DeepCopy() *BasicDevice {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicDevice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector.
+func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(CELDeviceSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Device) DeepCopyInto(out *Device) {
+ *out = *in
+ if in.Basic != nil {
+ in, out := &in.Basic, &out.Basic
+ *out = new(BasicDevice)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
+func (in *Device) DeepCopy() *Device {
+ if in == nil {
+ return nil
+ }
+ out := new(Device)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) {
+ *out = *in
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration.
+func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceAllocationConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) {
+ *out = *in
+ if in.Results != nil {
+ in, out := &in.Results, &out.Results
+ *out = make([]DeviceRequestAllocationResult, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make([]DeviceAllocationConfiguration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult.
+func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceAllocationResult)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) {
+ *out = *in
+ if in.IntValue != nil {
+ in, out := &in.IntValue, &out.IntValue
+ *out = new(int64)
+ **out = **in
+ }
+ if in.BoolValue != nil {
+ in, out := &in.BoolValue, &out.BoolValue
+ *out = new(bool)
+ **out = **in
+ }
+ if in.StringValue != nil {
+ in, out := &in.StringValue, &out.StringValue
+ *out = new(string)
+ **out = **in
+ }
+ if in.VersionValue != nil {
+ in, out := &in.VersionValue, &out.VersionValue
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute.
+func (in *DeviceAttribute) DeepCopy() *DeviceAttribute {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceAttribute)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceCapacity) DeepCopyInto(out *DeviceCapacity) {
+ *out = *in
+ out.Value = in.Value.DeepCopy()
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceCapacity.
+func (in *DeviceCapacity) DeepCopy() *DeviceCapacity {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceCapacity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) {
+ *out = *in
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = make([]DeviceRequest, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Constraints != nil {
+ in, out := &in.Constraints, &out.Constraints
+ *out = make([]DeviceConstraint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make([]DeviceClaimConfiguration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim.
+func (in *DeviceClaim) DeepCopy() *DeviceClaim {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) {
+ *out = *in
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration.
+func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClaimConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClass) DeepCopyInto(out *DeviceClass) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass.
+func (in *DeviceClass) DeepCopy() *DeviceClass {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClass)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeviceClass) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) {
+ *out = *in
+ in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration.
+func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClassConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DeviceClass, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList.
+func (in *DeviceClassList) DeepCopy() *DeviceClassList {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClassList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeviceClassList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) {
+ *out = *in
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
+ *out = make([]DeviceSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make([]DeviceClassConfiguration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec.
+func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceClassSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) {
+ *out = *in
+ if in.Opaque != nil {
+ in, out := &in.Opaque, &out.Opaque
+ *out = new(OpaqueDeviceConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration.
+func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) {
+ *out = *in
+ if in.Requests != nil {
+ in, out := &in.Requests, &out.Requests
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.MatchAttribute != nil {
+ in, out := &in.MatchAttribute, &out.MatchAttribute
+ *out = new(FullyQualifiedName)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint.
+func (in *DeviceConstraint) DeepCopy() *DeviceConstraint {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceConstraint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) {
+ *out = *in
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
+ *out = make([]DeviceSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AdminAccess != nil {
+ in, out := &in.AdminAccess, &out.AdminAccess
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest.
+func (in *DeviceRequest) DeepCopy() *DeviceRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) {
+ *out = *in
+ if in.AdminAccess != nil {
+ in, out := &in.AdminAccess, &out.AdminAccess
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult.
+func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceRequestAllocationResult)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) {
+ *out = *in
+ if in.CEL != nil {
+ in, out := &in.CEL, &out.CEL
+ *out = new(CELDeviceSelector)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector.
+func (in *DeviceSelector) DeepCopy() *DeviceSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(DeviceSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) {
+ *out = *in
+ if in.IPs != nil {
+ in, out := &in.IPs, &out.IPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData.
+func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkDeviceData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) {
+ *out = *in
+ in.Parameters.DeepCopyInto(&out.Parameters)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration.
+func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(OpaqueDeviceConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
+func (in *ResourceClaim) DeepCopy() *ResourceClaim {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceClaim) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference.
+func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimConsumerReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ResourceClaim, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList.
+func (in *ResourceClaimList) DeepCopy() *ResourceClaimList {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) {
+ *out = *in
+ in.Devices.DeepCopyInto(&out.Devices)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec.
+func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) {
+ *out = *in
+ if in.Allocation != nil {
+ in, out := &in.Allocation, &out.Allocation
+ *out = new(AllocationResult)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ReservedFor != nil {
+ in, out := &in.ReservedFor, &out.ReservedFor
+ *out = make([]ResourceClaimConsumerReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]AllocatedDeviceStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus.
+func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate.
+func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ResourceClaimTemplate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList.
+func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimTemplateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) {
+ *out = *in
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec.
+func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceClaimTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourcePool) DeepCopyInto(out *ResourcePool) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool.
+func (in *ResourcePool) DeepCopy() *ResourcePool {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourcePool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
+func (in *ResourceSlice) DeepCopy() *ResourceSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceSlice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceSlice) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ResourceSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
+func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceSliceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) {
+ *out = *in
+ out.Pool = in.Pool
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(corev1.NodeSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]Device, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec.
+func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceSliceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 000000000..b79111b81
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,166 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1beta1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeviceClass) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 32
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) {
+ return 1, 38
+}
diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto
index ec2beac46..dfc309bb4 100644
--- a/vendor/k8s.io/api/storage/v1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1/generated.proto
@@ -491,8 +491,8 @@ message VolumeAttachmentList {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
message VolumeAttachmentSource {
// persistentVolumeName represents the name of the persistent volume to attach.
diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go
index de2bbc2e0..3936dc83b 100644
--- a/vendor/k8s.io/api/storage/v1/types.go
+++ b/vendor/k8s.io/api/storage/v1/types.go
@@ -169,8 +169,8 @@ type VolumeAttachmentSpec struct {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
type VolumeAttachmentSource struct {
// persistentVolumeName represents the name of the persistent volume to attach.
@@ -433,7 +433,7 @@ const (
// ReadWriteOnceWithFSTypeFSGroupPolicy indicates that each volume will be examined
// to determine if the volume ownership and permissions
// should be modified. If a fstype is defined and the volume's access mode
- // contains ReadWriteOnce, then the defined fsGroup will be applied.
+ // contains ReadWriteOnce or ReadWriteOncePod, then the defined fsGroup will be applied.
// This mode should be defined if it's expected that the
// fsGroup may need to be modified depending on the pod's SecurityPolicy.
// This is the default behavior if no other FSGroupPolicy is defined.
diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
index 89b1cbb20..eee18bd18 100644
--- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
@@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
}
var map_VolumeAttachmentSource = map[string]string{
- "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.",
"persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.",
}
diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
index 380adbf66..79acbebd8 100644
--- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto
@@ -155,8 +155,8 @@ message VolumeAttachmentList {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
message VolumeAttachmentSource {
// persistentVolumeName represents the name of the persistent volume to attach.
diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go
index 1fbf65f81..7ef7353eb 100644
--- a/vendor/k8s.io/api/storage/v1alpha1/types.go
+++ b/vendor/k8s.io/api/storage/v1alpha1/types.go
@@ -84,8 +84,8 @@ type VolumeAttachmentSpec struct {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
type VolumeAttachmentSource struct {
// persistentVolumeName represents the name of the persistent volume to attach.
diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
index ac87dbdca..e44f37b2d 100644
--- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
@@ -72,7 +72,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
}
var map_VolumeAttachmentSource = map[string]string{
- "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.",
"persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.",
}
diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto
index dfef3f6cc..64dcc8262 100644
--- a/vendor/k8s.io/api/storage/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto
@@ -493,8 +493,8 @@ message VolumeAttachmentList {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
message VolumeAttachmentSource {
// persistentVolumeName represents the name of the persistent volume to attach.
diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go
index ce294e3db..d9b6b7685 100644
--- a/vendor/k8s.io/api/storage/v1beta1/types.go
+++ b/vendor/k8s.io/api/storage/v1beta1/types.go
@@ -176,8 +176,8 @@ type VolumeAttachmentSpec struct {
}
// VolumeAttachmentSource represents a volume that should be attached.
-// Right now only PersistenVolumes can be attached via external attacher,
-// in future we may allow also inline volumes in pods.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
// Exactly one member can be set.
type VolumeAttachmentSource struct {
// persistentVolumeName represents the name of the persistent volume to attach.
diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
index 8c1a66350..58da44fc8 100644
--- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
@@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
}
var map_VolumeAttachmentSource = map[string]string{
- "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
+ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.",
"persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.",
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index 57e0e71f6..6a3ab8f24 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{
metav1.StatusReasonGone: {},
metav1.StatusReasonInvalid: {},
metav1.StatusReasonServerTimeout: {},
+ metav1.StatusReasonStoreReadError: {},
metav1.StatusReasonTimeout: {},
metav1.StatusReasonTooManyRequests: {},
metav1.StatusReasonBadRequest: {},
@@ -775,6 +776,12 @@ func IsUnexpectedObjectError(err error) bool {
return err != nil && (ok || errors.As(err, &uoe))
}
+// IsStoreReadError determines if err is due to either failure to transform the
+// data from the storage, or failure to decode the object appropriately.
+func IsStoreReadError(err error) bool {
+ return ReasonForError(err) == metav1.StatusReasonStoreReadError
+}
+
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
// suggested seconds to wait, or false if the error does not imply a wait. It does not
// address whether the error *should* be retried, since some errors (like a 3xx) may
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
index 1e1330fff..3bd8bf535 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -10,5 +10,6 @@ reviewers:
- mikedanese
- liggitt
- janetkuo
- - ncdc
- dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go
new file mode 100644
index 000000000..72c6438cb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go
@@ -0,0 +1,165 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testrestmapper
+
+import (
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order:
+// 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy
+// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version,
+// all other groups alphabetical.
+//
+// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests
+// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked.
+func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper {
+ unionMapper := meta.MultiRESTMapper{}
+ unionedGroups := sets.NewString()
+ for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() {
+ if !unionedGroups.Has(enabledVersion.Group) {
+ unionedGroups.Insert(enabledVersion.Group)
+ unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme))
+ }
+ }
+
+ if len(versionPatterns) != 0 {
+ resourcePriority := []schema.GroupVersionResource{}
+ kindPriority := []schema.GroupVersionKind{}
+ for _, versionPriority := range versionPatterns {
+ resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
+ kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
+ }
+
+ return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
+ }
+
+ prioritizedGroups := []string{"", "extensions", "metrics"}
+ resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...)
+
+ prioritizedGroupsSet := sets.NewString(prioritizedGroups...)
+ remainingGroups := sets.String{}
+ for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() {
+ if !prioritizedGroupsSet.Has(enabledVersion.Group) {
+ remainingGroups.Insert(enabledVersion.Group)
+ }
+ }
+
+ remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...)
+ resourcePriority = append(resourcePriority, remainingResourcePriority...)
+ kindPriority = append(kindPriority, remainingKindPriority...)
+
+ return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
+}
+
+// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first,
+// then any non-preferred version of the group second.
+func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) {
+ resourcePriority := []schema.GroupVersionResource{}
+ kindPriority := []schema.GroupVersionKind{}
+
+ for _, group := range groups {
+ availableVersions := scheme.PrioritizedVersionsForGroup(group)
+ if len(availableVersions) > 0 {
+ resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource))
+ kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind))
+ }
+ }
+ for _, group := range groups {
+ resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource})
+ kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind})
+ }
+
+ return resourcePriority, kindPriority
+}
+
+func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper {
+ mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group))
+ for _, gv := range scheme.PrioritizedVersionsForGroup(group) {
+ for kind := range scheme.KnownTypes(gv) {
+ if ignoredKinds.Has(kind) {
+ continue
+ }
+ scope := meta.RESTScopeNamespace
+ if rootScopedKinds[gv.WithKind(kind).GroupKind()] {
+ scope = meta.RESTScopeRoot
+ }
+ mapper.Add(gv.WithKind(kind), scope)
+ }
+ }
+
+ return mapper
+}
+
+// hardcoded is good enough for the test we're running
+var rootScopedKinds = map[schema.GroupKind]bool{
+ {Group: "admission.k8s.io", Kind: "AdmissionReview"}: true,
+
+ {Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true,
+ {Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}: true,
+
+ {Group: "authentication.k8s.io", Kind: "TokenReview"}: true,
+
+ {Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}: true,
+ {Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true,
+ {Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}: true,
+
+ {Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true,
+
+ {Group: "", Kind: "Node"}: true,
+ {Group: "", Kind: "Namespace"}: true,
+ {Group: "", Kind: "PersistentVolume"}: true,
+ {Group: "", Kind: "ComponentStatus"}: true,
+
+ {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: true,
+ {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true,
+
+ {Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true,
+
+ {Group: "storage.k8s.io", Kind: "StorageClass"}: true,
+ {Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true,
+
+ {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true,
+
+ {Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true,
+
+ {Group: "audit.k8s.io", Kind: "Event"}: true,
+ {Group: "audit.k8s.io", Kind: "Policy"}: true,
+
+ {Group: "apiregistration.k8s.io", Kind: "APIService"}: true,
+
+ {Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true,
+
+ {Group: "wardle.example.com", Kind: "Fischer"}: true,
+}
+
+// hardcoded is good enough for the test we're running
+var ignoredKinds = sets.NewString(
+ "ListOptions",
+ "DeleteOptions",
+ "Status",
+ "PodLogOptions",
+ "PodExecOptions",
+ "PodAttachOptions",
+ "PodPortForwardOptions",
+ "PodProxyOptions",
+ "NodeProxyOptions",
+ "ServiceProxyOptions",
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 50af8334f..d0aada9dd 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -20,7 +20,7 @@ import (
"bytes"
"errors"
"fmt"
- "math"
+ math "math"
"math/big"
"strconv"
"strings"
@@ -460,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
}
}
-// AsApproximateFloat64 returns a float64 representation of the quantity which may
-// lose precision. If the value of the quantity is outside the range of a float64
-// +Inf/-Inf will be returned.
+// AsApproximateFloat64 returns a float64 representation of the quantity which
+// may lose precision. If precision matter more than performance, see
+// AsFloat64Slow. If the value of the quantity is outside the range of a
+// float64 +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
@@ -480,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 {
return base * math.Pow10(exponent)
}
+// AsFloat64Slow returns a float64 representation of the quantity. This is
+// more precise than AsApproximateFloat64 but significantly slower. If the
+// value of the quantity is outside the range of a float64 +Inf/-Inf will be
+// returned.
+func (q *Quantity) AsFloat64Slow() float64 {
+ infDec := q.AsDec()
+
+ var absScale int64
+ if infDec.Scale() < 0 {
+ absScale = int64(-infDec.Scale())
+ } else {
+ absScale = int64(infDec.Scale())
+ }
+ pow10AbsScale := big.NewInt(10)
+ pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil)
+
+ var resultBigFloat *big.Float
+ if infDec.Scale() < 0 {
+ resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(resultBigInt)
+ } else {
+ pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale)
+ resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig())
+ resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat)
+ }
+
+ result, _ := resultBigFloat.Float64()
+ return result
+}
+
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
index 593d7ba8c..54a2883a3 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -50,7 +50,7 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
}
}
if err := ValidateAnnotationsSize(annotations); err != nil {
- allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB))
+ allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, TotalAnnotationSizeLimitB))
}
return allErrs
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
index e7e5c152d..ec414a84b 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -11,6 +11,7 @@ reviewers:
- luxas
- janetkuo
- justinsb
- - ncdc
- soltysh
- dims
+emeritus_reviewers:
+ - ncdc
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 229ea2c2c..9ee6c0591 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -1355,187 +1355,190 @@ func init() {
}
var fileDescriptor_a8431b6e0aeeb761 = []byte{
- // 2873 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
- 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
- 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
- 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
- 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
- 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
- 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
- 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
- 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
- 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
- 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
- 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
- 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
- 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
- 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
- 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
- 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
- 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
- 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
- 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
- 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
- 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
- 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
- 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
- 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
- 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
- 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
- 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
- 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
- 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
- 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
- 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
- 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
- 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
- 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
- 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
- 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
- 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
- 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
- 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
- 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
- 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
- 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
- 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
- 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
- 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
- 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
- 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
- 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
- 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
- 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
- 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
- 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
- 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
- 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
- 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
- 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
- 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
- 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
- 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
- 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
- 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
- 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
- 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
- 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
- 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
- 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
- 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
- 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
- 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
- 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
- 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
- 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
- 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
- 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
- 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
- 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
- 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
- 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
- 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
- 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
- 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
- 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
- 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
- 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
- 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
- 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
- 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
- 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
- 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
- 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
- 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
- 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
- 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
- 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
- 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
- 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
- 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
- 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
- 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
- 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
- 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
- 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
- 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
- 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
- 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
- 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
- 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
- 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
- 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
- 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
- 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
- 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
- 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
- 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
- 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
- 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
- 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
- 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
- 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
- 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
- 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
- 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
- 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
- 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
- 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
- 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
- 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
- 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
- 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
- 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
- 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
- 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
- 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
- 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
- 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
- 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
- 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
- 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
- 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
- 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
- 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
- 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
- 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
- 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
- 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
- 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
- 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
- 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
- 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
- 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
- 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
- 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
- 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
- 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
- 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
- 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
- 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
- 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
- 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
- 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
- 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
- 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
- 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
- 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
- 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
- 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
- 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
- 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
- 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
- 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
- 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
- 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
- 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
- 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
- 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
- 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
- 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
- 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
- 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
+ // 2928 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47,
+ 0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e,
+ 0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c,
+ 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93,
+ 0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c,
+ 0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44,
+ 0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb,
+ 0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2,
+ 0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d,
+ 0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3,
+ 0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa,
+ 0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc,
+ 0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda,
+ 0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3,
+ 0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb,
+ 0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf,
+ 0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c,
+ 0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93,
+ 0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4,
+ 0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c,
+ 0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f,
+ 0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc,
+ 0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f,
+ 0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61,
+ 0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62,
+ 0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d,
+ 0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3,
+ 0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6,
+ 0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55,
+ 0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49,
+ 0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33,
+ 0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6,
+ 0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05,
+ 0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea,
+ 0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a,
+ 0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f,
+ 0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0,
+ 0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75,
+ 0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17,
+ 0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c,
+ 0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69,
+ 0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5,
+ 0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb,
+ 0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef,
+ 0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42,
+ 0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b,
+ 0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4,
+ 0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc,
+ 0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c,
+ 0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5,
+ 0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f,
+ 0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f,
+ 0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9,
+ 0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80,
+ 0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1,
+ 0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38,
+ 0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e,
+ 0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc,
+ 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79,
+ 0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b,
+ 0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77,
+ 0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5,
+ 0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c,
+ 0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45,
+ 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc,
+ 0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69,
+ 0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e,
+ 0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30,
+ 0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76,
+ 0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a,
+ 0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0,
+ 0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7,
+ 0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d,
+ 0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e,
+ 0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa,
+ 0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f,
+ 0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12,
+ 0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b,
+ 0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d,
+ 0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05,
+ 0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19,
+ 0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10,
+ 0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d,
+ 0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe,
+ 0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2,
+ 0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f,
+ 0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27,
+ 0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb,
+ 0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52,
+ 0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd,
+ 0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb,
+ 0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9,
+ 0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d,
+ 0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c,
+ 0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70,
+ 0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53,
+ 0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a,
+ 0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde,
+ 0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14,
+ 0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7,
+ 0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97,
+ 0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e,
+ 0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60,
+ 0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e,
+ 0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38,
+ 0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe,
+ 0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed,
+ 0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c,
+ 0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf,
+ 0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec,
+ 0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b,
+ 0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2,
+ 0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94,
+ 0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40,
+ 0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8,
+ 0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba,
+ 0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0,
+ 0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e,
+ 0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09,
+ 0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f,
+ 0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd,
+ 0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03,
+ 0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53,
+ 0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8,
+ 0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d,
+ 0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03,
+ 0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0,
+ 0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7,
+ 0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b,
+ 0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13,
+ 0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c,
+ 0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b,
+ 0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86,
+ 0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e,
+ 0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18,
+ 0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5,
+ 0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77,
+ 0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f,
+ 0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86,
+ 0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47,
+ 0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32,
+ 0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95,
+ 0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e,
+ 0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30,
+ 0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a,
+ 0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac,
+ 0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63,
+ 0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8,
+ 0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51,
+ 0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b,
+ 0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64,
+ 0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b,
+ 0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99,
+ 0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71,
+ 0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e,
+ 0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c,
+ 0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b,
+ 0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84,
+ 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e,
+ 0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4,
+ 0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9,
+ 0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2,
+ 0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa,
+ 0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2,
+ 0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf,
+ 0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03,
+ 0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e,
+ 0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9,
+ 0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c,
+ 0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde,
+ 0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc,
+ 0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2,
+ 0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8,
+ 0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5,
+ 0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed,
+ 0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72,
+ 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb,
+ 0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a,
+ 0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92,
+ 0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e,
+ 0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1,
+ 0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd,
+ 0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00,
}
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@@ -1983,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ i--
+ if *m.IgnoreStoreReadErrorWithClusterBreakingPotential {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
if len(m.DryRun) > 0 {
for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.DryRun[iNdEx])
@@ -3773,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ n += 2
+ }
return n
}
@@ -4506,6 +4522,7 @@ func (this *DeleteOptions) String() string {
`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+ `IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`,
`}`,
}, "")
return s
@@ -6456,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
}
m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 18dd0b067..865d3e7ca 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -315,6 +315,21 @@ message DeleteOptions {
// +optional
// +listType=atomic
repeated string dryRun = 5;
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6;
}
// Duration is a wrapper around time.Duration which supports correct
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 473adb9ef..4cf3f4795 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -439,6 +439,20 @@ const (
//
// The annotation is added to a "Bookmark" event.
InitialEventsAnnotationKey = "k8s.io/initial-events-end"
+
+ // InitialEventsListBlueprintAnnotationKey is the name of the key
+ // where an empty, versioned list is encoded in the requested format
+ // (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string.
+ //
+ // This encoding matches the request encoding format, which may be
+ // protobuf, JSON, CBOR, or others, depending on what the client requested.
+ // This ensures that the reconstructed list can be processed through the
+ // same decoder chain that would handle a standard LIST call response.
+ //
+ // The annotation is added to a "Bookmark" event and is used by clients
+ // to guarantee the format consistency when reconstructing
+ // the list during WatchList processing.
+ InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint"
)
// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
@@ -546,6 +560,21 @@ type DeleteOptions struct {
// +optional
// +listType=atomic
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ // +optional
+ IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"`
}
const (
@@ -902,6 +931,22 @@ const (
// Status code 500
StatusReasonServerTimeout StatusReason = "ServerTimeout"
+ // StatusReasonStoreReadError means that the server encountered an error while
+ // retrieving resources from the backend object store.
+ // This may be due to backend database error, or because processing of the read
+ // resource failed.
+ // Details:
+ // "kind" string - the kind attribute of the resource being acted on.
+ // "name" string - the prefix where the reading error(s) occurred
+ // "causes" []StatusCause
+ // - (optional):
+ // - "type" CauseType - CauseTypeUnexpectedServerResponse
+ // - "message" string - the error message from the store backend
+ // - "field" string - the full path with the key of the resource that failed reading
+ //
+ // Status code 500
+ StatusReasonStoreReadError StatusReason = "StorageReadError"
+
// StatusReasonTimeout means that the request could not be completed within the given time.
// Clients can get this response only when they specified a timeout param in the request,
// or if the server cannot complete the operation within a reasonable amount of time.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index 1fa37215c..405496d3d 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -129,6 +129,7 @@ var map_DeleteOptions = map[string]string{
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
"propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
}
func (DeleteOptions) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
index 0f58d66c0..71f7b163a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -20,6 +20,7 @@ import (
gojson "encoding/json"
"fmt"
"io"
+ "math/big"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err
return i, true, nil
}
+// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a
+// float64, it is returned. If the field's value is an int64 that can be losslessly converted to
+// float64, it will be converted and returned. Returns false if value is not found and an error if
+// not a float64 or an int64 that can be accurately represented as a float64.
+func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+ val, found, err := NestedFieldNoCopy(obj, fields...)
+ if !found || err != nil {
+ return 0, found, err
+ }
+ switch x := val.(type) {
+ case int64:
+ f, accuracy := big.NewInt(x).Float64()
+ if accuracy != big.Exact {
+ return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x)
+ }
+ return f, true, nil
+ case float64:
+ return x, true, nil
+ default:
+ return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val)
+ }
+}
+
// NestedStringSlice returns a copy of []string value of a nested field.
// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
index 40d289f37..5e36a91ee 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) {
}
func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
- items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+ v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields")
if !found || err != nil {
return nil
}
+ items, ok := v.([]interface{})
+ if !ok {
+ return nil
+ }
managedFields := []metav1.ManagedFieldsEntry{}
for _, item := range items {
m, ok := item.(map[string]interface{})
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
index 3eba5ba54..b1eb1bbfc 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -26,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
+
+ "k8s.io/utils/ptr"
)
// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options
@@ -165,6 +167,7 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"}))
}
allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
+ allErrs = append(allErrs, ValidateIgnoreStoreReadError(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), options)...)
return allErrs
}
@@ -186,15 +189,16 @@ func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList {
func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList {
allErrs := field.ErrorList{}
- if patchType != types.ApplyPatchType {
- if options.Force != nil {
- allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
- }
- } else {
+ switch patchType {
+ case types.ApplyYAMLPatchType, types.ApplyCBORPatchType:
if options.FieldManager == "" {
// This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers.
allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch"))
}
+ default:
+ if options.Force != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
+ }
}
allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...)
allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
@@ -212,7 +216,7 @@ func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorL
// considered as not set and is defaulted by the rest of the process
// (unless apply is used, in which case it is required).
if len(fieldManager) > FieldManagerMaxLength {
- allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength))
+ allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength))
}
// Verify that all characters are printable.
for i, r := range fieldManager {
@@ -277,7 +281,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...)
if len(fields.Subresource) > MaxSubresourceNameLength {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), "" /*unused*/, MaxSubresourceNameLength))
}
}
return allErrs
@@ -334,12 +338,12 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er
allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr))
}
if len(condition.Reason) > maxReasonLen {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen))
}
}
if len(condition.Message) > maxMessageLen {
- allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen))
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen))
}
return allErrs
@@ -357,3 +361,31 @@ func isValidConditionReason(value string) []string {
}
return nil
}
+
+// ValidateIgnoreStoreReadError validates that delete options are valid when
+// ignoreStoreReadErrorWithClusterBreakingPotential is enabled
+func ValidateIgnoreStoreReadError(fldPath *field.Path, options *metav1.DeleteOptions) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if enabled := ptr.Deref[bool](options.IgnoreStoreReadErrorWithClusterBreakingPotential, false); !enabled {
+ return allErrs
+ }
+
+ if len(options.DryRun) > 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .dryRun"))
+ }
+ if options.PropagationPolicy != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .propagationPolicy"))
+ }
+ //nolint:staticcheck // Keep validation for deprecated OrphanDependents option until it's being removed
+ if options.OrphanDependents != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .orphanDependents"))
+ }
+ if options.GracePeriodSeconds != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .gracePeriodSeconds"))
+ }
+ if options.Preconditions != nil {
+ allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .preconditions"))
+ }
+
+ return allErrs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
index afe01ed5a..82e272240 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio
} else {
out.DryRun = nil
}
+ if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 {
+ if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil {
+ return err
+ }
+ } else {
+ out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
+ }
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index 90cc54a7e..6b0d0dfee 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+ in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential
+ *out = new(bool)
+ **out = **in
+ }
return
}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 9e22a0056..fafa81a3d 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -18,6 +18,7 @@ package labels
import (
"fmt"
+ "slices"
"sort"
"strconv"
"strings"
@@ -27,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
- stringslices "k8s.io/utils/strings/slices"
)
var (
@@ -313,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool {
if r.operator != x.operator {
return false
}
- return stringslices.Equal(r.strValues, x.strValues)
+ return slices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
index cc0a77bba..395dfdbd0 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error {
func (e *encoderWithAllocator) Identifier() Identifier {
return e.encoder.Identifier()
}
+
+type nondeterministicEncoderToEncoderAdapter struct {
+ NondeterministicEncoder
+}
+
+func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error {
+ return e.EncodeNondeterministic(obj, w)
+}
+
+// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's
+// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the
+// provided Encoder as-is.
+func UseNondeterministicEncoding(encoder Encoder) Encoder {
+ if nondeterministic, ok := encoder.(NondeterministicEncoder); ok {
+ return nondeterministicEncoderToEncoderAdapter{nondeterministic}
+ }
+ return encoder
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index e89ea8939..2703300cd 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -69,6 +69,19 @@ type Encoder interface {
Identifier() Identifier
}
+// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in
+// cases where the output does not need to be deterministic.
+type NondeterministicEncoder interface {
+ Encoder
+
+ // EncodeNondeterministic writes an object to the stream. Unlike the Encode method of
+ // Encoder, EncodeNondeterministic does not guarantee that any two invocations will write
+ // the same sequence of bytes to the io.Writer. Any differences will not be significant to a
+ // generic decoder. For example, map entries and struct fields might be encoded in any
+ // order.
+ EncodeNondeterministic(Object, io.Writer) error
+}
+
// MemoryAllocator is responsible for allocating memory.
// By encapsulating memory allocation into its own interface, we can reuse the memory
// across many operations in places we know it can significantly improve the performance.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
new file mode 100644
index 000000000..4d069a903
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
@@ -0,0 +1,389 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
+ "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+ util "k8s.io/apimachinery/pkg/util/runtime"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+type metaFactory interface {
+ // Interpret should return the version and kind of the wire-format of the object.
+ Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+type defaultMetaFactory struct{}
+
+func (mf *defaultMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+ var tm metav1.TypeMeta
+ // The input is expected to include additional map keys besides apiVersion and kind, so use
+ // lax mode for decoding into TypeMeta.
+ if err := modes.DecodeLax.Unmarshal(data, &tm); err != nil {
+ return nil, fmt.Errorf("unable to determine group/version/kind: %w", err)
+ }
+ actual := tm.GetObjectKind().GroupVersionKind()
+ return &actual, nil
+}
+
+type Serializer interface {
+ runtime.Serializer
+ runtime.NondeterministicEncoder
+ recognizer.RecognizingDecoder
+
+ // NewSerializer returns a value of this interface type rather than exporting the serializer
+ // type and returning one of those because the zero value of serializer isn't ready to
+ // use. Users aren't intended to implement cbor.Serializer themselves, and this unexported
+ // interface method is here to prevent that (https://go.dev/blog/module-compatibility).
+ private()
+}
+
+var _ Serializer = &serializer{}
+
+type options struct {
+ strict bool
+ transcode bool
+}
+
+type Option func(*options)
+
+// Strict configures a serializer to return a strict decoding error when it encounters map keys that
+// do not correspond to a field in the target object of a decode operation. This option is disabled
+// by default.
+func Strict(s bool) Option {
+ return func(opts *options) {
+ opts.strict = s
+ }
+}
+
+// Transcode configures a serializer to transcode the "raw" bytes of a decoded runtime.RawExtension
+// or metav1.FieldsV1 object to JSON. This is enabled by default to support existing programs that
+// depend on the assumption that objects of either type contain valid JSON.
+func Transcode(s bool) Option {
+ return func(opts *options) {
+ opts.transcode = s
+ }
+}
+
+type serializer struct {
+ metaFactory metaFactory
+ creater runtime.ObjectCreater
+ typer runtime.ObjectTyper
+ options options
+}
+
+func (serializer) private() {}
+
+// NewSerializer creates and returns a serializer configured with the provided options. The default
+// options are equivalent to explicitly passing Strict(false) and Transcode(true).
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) Serializer {
+ return newSerializer(&defaultMetaFactory{}, creater, typer, options...)
+}
+
+func newSerializer(metaFactory metaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) *serializer {
+ s := &serializer{
+ metaFactory: metaFactory,
+ creater: creater,
+ typer: typer,
+ }
+ s.options.transcode = true
+ for _, o := range options {
+ o(&s.options)
+ }
+ return s
+}
+
+func (s *serializer) Identifier() runtime.Identifier {
+ return "cbor"
+}
+
+// Encode writes a CBOR representation of the given object.
+//
+// Because the CBOR data item written by a call to Encode is always enclosed in the "self-described
+// CBOR" tag, its encoded form always has the prefix 0xd9d9f7. This prefix is suitable for use as a
+// "magic number" for distinguishing encoded CBOR from other protocols.
+//
+// The default serialization behavior for any given object replicates the behavior of the JSON
+// serializer as far as it is necessary to allow the CBOR serializer to be used as a drop-in
+// replacement for the JSON serializer, with limited exceptions. For example, the distinction
+// between integers and floating-point numbers is preserved in CBOR due to its distinct
+// representations for each type.
+//
+// Objects implementing runtime.Unstructured will have their unstructured content encoded rather
+// than following the default behavior for their dynamic type.
+func (s *serializer) Encode(obj runtime.Object, w io.Writer) error {
+ return s.encode(modes.Encode, obj, w)
+}
+
+func (s *serializer) EncodeNondeterministic(obj runtime.Object, w io.Writer) error {
+ return s.encode(modes.EncodeNondeterministic, obj, w)
+}
+
+func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer) error {
+ var v interface{} = obj
+ if u, ok := obj.(runtime.Unstructured); ok {
+ v = u.UnstructuredContent()
+ }
+
+ if err := modes.RejectCustomMarshalers(v); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(selfDescribedCBOR); err != nil {
+ return err
+ }
+
+ return mode.MarshalTo(v, w)
+}
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+ if len(actual.Kind) == 0 {
+ actual.Kind = defaultGVK.Kind
+ }
+ if len(actual.Version) == 0 && len(actual.Group) == 0 {
+ actual.Group = defaultGVK.Group
+ actual.Version = defaultGVK.Version
+ }
+ if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+ actual.Version = defaultGVK.Version
+ }
+ return actual
+}
+
+// diagnose returns the diagnostic encoding of a well-formed CBOR data item.
+func diagnose(data []byte) string {
+ diag, err := modes.Diagnostic.Diagnose(data)
+ if err != nil {
+ // Since the input must already be well-formed CBOR, converting it to diagnostic
+ // notation should not fail.
+ util.HandleError(err)
+
+ return hex.EncodeToString(data)
+ }
+ return diag
+}
+
+// unmarshal unmarshals CBOR data from the provided byte slice into a Go object. If the decoder is
+// configured to report strict errors, the first error return value may be a non-nil strict decoding
+// error. If the last error return value is non-nil, then the unmarshal failed entirely and the
+// state of the destination object should not be relied on.
+func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error) {
+ if u, ok := into.(runtime.Unstructured); ok {
+ var content map[string]interface{}
+ defer func() {
+ switch u := u.(type) {
+ case *unstructured.UnstructuredList:
+ // UnstructuredList's implementation of SetUnstructuredContent
+ // produces different objects than those produced by a decode using
+ // UnstructuredJSONScheme:
+ //
+ // 1. SetUnstructuredContent retains the "items" key in the list's
+ // Object field. It is omitted from Object when decoding with
+ // UnstructuredJSONScheme.
+ // 2. SetUnstructuredContent does not populate "apiVersion" and
+ // "kind" on each entry of its Items
+ // field. UnstructuredJSONScheme does, inferring the singular
+ // Kind from the list Kind.
+ // 3. SetUnstructuredContent ignores entries of "items" that are
+ // not JSON objects or are objects without
+ // "kind". UnstructuredJSONScheme returns an error in either
+ // case.
+ //
+ // UnstructuredJSONScheme's behavior is replicated here.
+ var items []interface{}
+ if uncast, present := content["items"]; present {
+ var cast bool
+ items, cast = uncast.([]interface{})
+ if !cast {
+ strict, lax = nil, fmt.Errorf("items field of UnstructuredList must be encoded as an array or null if present")
+ return
+ }
+ }
+ apiVersion, _ := content["apiVersion"].(string)
+ kind, _ := content["kind"].(string)
+ kind = strings.TrimSuffix(kind, "List")
+ var unstructureds []unstructured.Unstructured
+ if len(items) > 0 {
+ unstructureds = make([]unstructured.Unstructured, len(items))
+ }
+ for i := range items {
+ object, cast := items[i].(map[string]interface{})
+ if !cast {
+ strict, lax = nil, fmt.Errorf("elements of the items field of UnstructuredList must be encoded as a map")
+ return
+ }
+
+ // As in UnstructuredJSONScheme, only set the heuristic
+ // singular GVK when both "apiVersion" and "kind" are either
+ // missing, non-string, or empty.
+ object["apiVersion"], _ = object["apiVersion"].(string)
+ object["kind"], _ = object["kind"].(string)
+ if object["apiVersion"] == "" && object["kind"] == "" {
+ object["apiVersion"] = apiVersion
+ object["kind"] = kind
+ }
+
+ if object["kind"] == "" {
+ strict, lax = nil, runtime.NewMissingKindErr(diagnose(data))
+ return
+ }
+ if object["apiVersion"] == "" {
+ strict, lax = nil, runtime.NewMissingVersionErr(diagnose(data))
+ return
+ }
+
+ unstructureds[i].Object = object
+ }
+ delete(content, "items")
+ u.Object = content
+ u.Items = unstructureds
+ default:
+ u.SetUnstructuredContent(content)
+ }
+ }()
+ into = &content
+ } else if err := modes.RejectCustomMarshalers(into); err != nil {
+ return nil, err
+ }
+
+ if !s.options.strict {
+ return nil, modes.DecodeLax.Unmarshal(data, into)
+ }
+
+ err := modes.Decode.Unmarshal(data, into)
+ // TODO: UnknownFieldError is ambiguous. It only provides the index of the first problematic
+ // map entry encountered and does not indicate which map the index refers to.
+ var unknownField *cbor.UnknownFieldError
+ if errors.As(err, &unknownField) {
+ // Unlike JSON, there are no strict errors in CBOR for duplicate map keys. CBOR maps
+ // with duplicate keys are considered invalid according to the spec and are rejected
+ // entirely.
+ return runtime.NewStrictDecodingError([]error{unknownField}), modes.DecodeLax.Unmarshal(data, into)
+ }
+ return nil, err
+}
+
+func (s *serializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+ // A preliminary pass over the input to obtain the actual GVK is redundant on a successful
+ // decode into Unstructured.
+ if _, ok := into.(runtime.Unstructured); ok {
+ if _, unmarshalErr := s.unmarshal(data, into); unmarshalErr != nil {
+ actual, interpretErr := s.metaFactory.Interpret(data)
+ if interpretErr != nil {
+ return nil, nil, interpretErr
+ }
+
+ if gvk != nil {
+ *actual = gvkWithDefaults(*actual, *gvk)
+ }
+
+ return nil, actual, unmarshalErr
+ }
+
+ actual := into.GetObjectKind().GroupVersionKind()
+ if len(actual.Kind) == 0 {
+ return nil, &actual, runtime.NewMissingKindErr(diagnose(data))
+ }
+ if len(actual.Version) == 0 {
+ return nil, &actual, runtime.NewMissingVersionErr(diagnose(data))
+ }
+
+ return into, &actual, nil
+ }
+
+ actual, err := s.metaFactory.Interpret(data)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if gvk != nil {
+ *actual = gvkWithDefaults(*actual, *gvk)
+ }
+
+ if into != nil {
+ types, _, err := s.typer.ObjectKinds(into)
+ if err != nil {
+ return nil, actual, err
+ }
+ *actual = gvkWithDefaults(*actual, types[0])
+ }
+
+ if len(actual.Kind) == 0 {
+ return nil, actual, runtime.NewMissingKindErr(diagnose(data))
+ }
+ if len(actual.Version) == 0 {
+ return nil, actual, runtime.NewMissingVersionErr(diagnose(data))
+ }
+
+ obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ strict, err := s.unmarshal(data, obj)
+ if err != nil {
+ return nil, actual, err
+ }
+
+ if s.options.transcode {
+ if err := transcodeRawTypes(obj); err != nil {
+ return nil, actual, err
+ }
+ }
+
+ return obj, actual, strict
+}
+
+// selfDescribedCBOR is the CBOR encoding of the head of tag number 55799. This tag, specified in
+// RFC 8949 Section 3.4.6 "Self-Described CBOR", encloses all output from the encoder, has no
+// special semantics, and is used as a magic number to recognize CBOR-encoded data items.
+//
+// See https://www.rfc-editor.org/rfc/rfc8949.html#name-self-described-cbor.
+var selfDescribedCBOR = []byte{0xd9, 0xd9, 0xf7}
+
+func (s *serializer) RecognizesData(data []byte) (ok, unknown bool, err error) {
+ return bytes.HasPrefix(data, selfDescribedCBOR), false, nil
+}
+
+// NewSerializerInfo returns a default SerializerInfo for CBOR using the given creater and typer.
+func NewSerializerInfo(creater runtime.ObjectCreater, typer runtime.ObjectTyper) runtime.SerializerInfo {
+ return runtime.SerializerInfo{
+ MediaType: "application/cbor",
+ MediaTypeType: "application",
+ MediaTypeSubType: "cbor",
+ Serializer: NewSerializer(creater, typer),
+ StrictSerializer: NewSerializer(creater, typer, Strict(true)),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ Framer: NewFramer(),
+ Serializer: NewSerializer(creater, typer, Transcode(false)),
+ },
+ }
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
index cd78b1df2..a71a487f9 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
@@ -23,14 +23,39 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
)
+// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
+// make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
func Marshal(src interface{}) ([]byte, error) {
+ if err := modes.RejectCustomMarshalers(src); err != nil {
+ return nil, err
+ }
return modes.Encode.Marshal(src)
}
+// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
+// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
func Unmarshal(src []byte, dst interface{}) error {
+ if err := modes.RejectCustomMarshalers(dst); err != nil {
+ return err
+ }
return modes.Decode.Unmarshal(src, dst)
}
+// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in
+// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to
+// be parsed.
func Diagnose(src []byte) (string, error) {
return modes.Diagnostic.Diagnose(src)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
new file mode 100644
index 000000000..28a733c67
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "io"
+
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// NewFramer returns a runtime.Framer based on RFC 8742 CBOR Sequences. Each frame contains exactly
+// one encoded CBOR data item.
+func NewFramer() runtime.Framer {
+ return framer{}
+}
+
+var _ runtime.Framer = framer{}
+
+type framer struct{}
+
+func (framer) NewFrameReader(rc io.ReadCloser) io.ReadCloser {
+ return &frameReader{
+ decoder: cbor.NewDecoder(rc),
+ closer: rc,
+ }
+}
+
+func (framer) NewFrameWriter(w io.Writer) io.Writer {
+ // Each data item in a CBOR sequence is self-delimiting (like JSON objects).
+ return w
+}
+
+type frameReader struct {
+ decoder *cbor.Decoder
+ closer io.Closer
+
+ overflow []byte
+}
+
+func (fr *frameReader) Read(dst []byte) (int, error) {
+ if len(fr.overflow) > 0 {
+ // We read a frame that was too large for the destination slice in a previous call
+ // to Read and have bytes left over.
+ n := copy(dst, fr.overflow)
+ if n < len(fr.overflow) {
+ fr.overflow = fr.overflow[n:]
+ return n, io.ErrShortBuffer
+ }
+ fr.overflow = nil
+ return n, nil
+ }
+
+ // The Reader contract allows implementations to use all of dst[0:len(dst)] as scratch
+ // space, even if n < len(dst), but it does not allow implementations to use
+ // dst[len(dst):cap(dst)]. Slicing it up-front allows us to append to it without worrying
+ // about overwriting dst[len(dst):cap(dst)].
+ m := cbor.RawMessage(dst[0:0:len(dst)])
+ if err := fr.decoder.Decode(&m); err != nil {
+ return 0, err
+ }
+
+ if len(m) > len(dst) {
+ // The frame was too big, m has a newly-allocated underlying array to accommodate
+ // it.
+ fr.overflow = m[len(dst):]
+ return copy(dst, m), io.ErrShortBuffer
+ }
+
+ return len(m), nil
+}
+
+func (fr *frameReader) Close() error {
+ return fr.closer.Close()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
index c66931384..5fae14151 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
@@ -105,7 +105,7 @@ var Encode = EncMode{
var EncodeNondeterministic = EncMode{
delegate: func() cbor.UserBufferEncMode {
opts := Encode.options()
- opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0.
+ opts.Sort = cbor.SortFastShuffle
em, err := opts.UserBufferEncMode()
if err != nil {
panic(err)
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
new file mode 100644
index 000000000..09d1340f9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var sharedTranscoders transcoders
+
+var rawTypeTranscodeFuncs = map[reflect.Type]func(reflect.Value) error{
+ reflect.TypeFor[runtime.RawExtension](): func(rv reflect.Value) error {
+ if !rv.CanAddr() {
+ return nil
+ }
+ re := rv.Addr().Interface().(*runtime.RawExtension)
+ if re.Raw == nil {
+ // When Raw is nil it encodes to null. Don't change nil Raw values during
+ // transcoding, they would have unmarshalled from JSON as nil too.
+ return nil
+ }
+ j, err := re.MarshalJSON()
+ if err != nil {
+ return fmt.Errorf("failed to transcode RawExtension to JSON: %w", err)
+ }
+ re.Raw = j
+ return nil
+ },
+ reflect.TypeFor[metav1.FieldsV1](): func(rv reflect.Value) error {
+ if !rv.CanAddr() {
+ return nil
+ }
+ fields := rv.Addr().Interface().(*metav1.FieldsV1)
+ if fields.Raw == nil {
+ // When Raw is nil it encodes to null. Don't change nil Raw values during
+ // transcoding, they would have unmarshalled from JSON as nil too.
+ return nil
+ }
+ j, err := fields.MarshalJSON()
+ if err != nil {
+ return fmt.Errorf("failed to transcode FieldsV1 to JSON: %w", err)
+ }
+ fields.Raw = j
+ return nil
+ },
+}
+
+func transcodeRawTypes(v interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ rv := reflect.ValueOf(v)
+ return sharedTranscoders.getTranscoder(rv.Type()).fn(rv)
+}
+
+type transcoder struct {
+ fn func(rv reflect.Value) error
+}
+
+var noop = transcoder{
+ fn: func(reflect.Value) error {
+ return nil
+ },
+}
+
+type transcoders struct {
+ lock sync.RWMutex
+ m map[reflect.Type]**transcoder
+}
+
+func (ts *transcoders) getTranscoder(rt reflect.Type) transcoder {
+ ts.lock.RLock()
+ tpp, ok := ts.m[rt]
+ ts.lock.RUnlock()
+ if ok {
+ return **tpp
+ }
+
+ ts.lock.Lock()
+ defer ts.lock.Unlock()
+ tp := ts.getTranscoderLocked(rt)
+ return *tp
+}
+
+func (ts *transcoders) getTranscoderLocked(rt reflect.Type) *transcoder {
+ if tpp, ok := ts.m[rt]; ok {
+ // A transcoder for this type was cached while waiting to acquire the lock.
+ return *tpp
+ }
+
+ // Cache the transcoder now, before populating fn, so that circular references between types
+ // don't overflow the call stack.
+ t := new(transcoder)
+ if ts.m == nil {
+ ts.m = make(map[reflect.Type]**transcoder)
+ }
+ ts.m[rt] = &t
+
+ for rawType, fn := range rawTypeTranscodeFuncs {
+ if rt == rawType {
+ t = &transcoder{fn: fn}
+ return t
+ }
+ }
+
+ switch rt.Kind() {
+ case reflect.Array:
+ te := ts.getTranscoderLocked(rt.Elem())
+ rtlen := rt.Len()
+ if rtlen == 0 || te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for i := 0; i < rtlen; i++ {
+ if err := te.fn(rv.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Interface:
+ // Any interface value might have a dynamic type involving RawExtension. It needs to
+ // be checked.
+ t.fn = func(rv reflect.Value) error {
+ if rv.IsNil() {
+ return nil
+ }
+ rv = rv.Elem()
+ // The interface element's type is dynamic so its transcoder can't be
+ // determined statically.
+ return ts.getTranscoder(rv.Type()).fn(rv)
+ }
+ case reflect.Map:
+ rtk := rt.Key()
+ tk := ts.getTranscoderLocked(rtk)
+ rte := rt.Elem()
+ te := ts.getTranscoderLocked(rte)
+ if tk == &noop && te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ iter := rv.MapRange()
+ rvk := reflect.New(rtk).Elem()
+ rve := reflect.New(rte).Elem()
+ for iter.Next() {
+ rvk.SetIterKey(iter)
+ if err := tk.fn(rvk); err != nil {
+ return err
+ }
+ rve.SetIterValue(iter)
+ if err := te.fn(rve); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Pointer:
+ te := ts.getTranscoderLocked(rt.Elem())
+ if te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ if rv.IsNil() {
+ return nil
+ }
+ return te.fn(rv.Elem())
+ }
+ case reflect.Slice:
+ te := ts.getTranscoderLocked(rt.Elem())
+ if te == &noop {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for i := 0; i < rv.Len(); i++ {
+ if err := te.fn(rv.Index(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ case reflect.Struct:
+ type fieldTranscoder struct {
+ Index int
+ Transcoder *transcoder
+ }
+ var fieldTranscoders []fieldTranscoder
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ tf := ts.getTranscoderLocked(f.Type)
+ if tf == &noop {
+ continue
+ }
+ fieldTranscoders = append(fieldTranscoders, fieldTranscoder{Index: i, Transcoder: tf})
+ }
+ if len(fieldTranscoders) == 0 {
+ t = &noop
+ break
+ }
+ t.fn = func(rv reflect.Value) error {
+ for _, ft := range fieldTranscoders {
+ if err := ft.Transcoder.fn(rv.Field(ft.Index)); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ default:
+ t = &noop
+ }
+
+ return t
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
index ff9820842..77bb30745 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -17,9 +17,6 @@ limitations under the License.
package serializer
import (
- "mime"
- "strings"
-
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
@@ -28,41 +25,26 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
)
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
- AcceptContentTypes []string
- ContentType string
- FileExtensions []string
- // EncodesAsText should be true if this content type can be represented safely in UTF-8
- EncodesAsText bool
-
- Serializer runtime.Serializer
- PrettySerializer runtime.Serializer
- StrictSerializer runtime.Serializer
-
- AcceptStreamContentTypes []string
- StreamContentType string
-
- Framer runtime.Framer
- StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo {
jsonSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
)
- jsonSerializerType := serializerType{
- AcceptContentTypes: []string{runtime.ContentTypeJSON},
- ContentType: runtime.ContentTypeJSON,
- FileExtensions: []string{"json"},
- EncodesAsText: true,
- Serializer: jsonSerializer,
-
- Framer: json.Framer,
- StreamSerializer: jsonSerializer,
+ jsonSerializerType := runtime.SerializerInfo{
+ MediaType: runtime.ContentTypeJSON,
+ MediaTypeType: "application",
+ MediaTypeSubType: "json",
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ StrictSerializer: json.NewSerializerWithOptions(
+ mf, scheme, scheme,
+ json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
+ ),
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ EncodesAsText: true,
+ Serializer: jsonSerializer,
+ Framer: json.Framer,
+ },
}
if options.Pretty {
jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
@@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
)
}
- strictJSONSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
- )
- jsonSerializerType.StrictSerializer = strictJSONSerializer
-
yamlSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
@@ -88,35 +64,35 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
protoSerializer := protobuf.NewSerializer(scheme, scheme)
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
- serializers := []serializerType{
+ serializers := []runtime.SerializerInfo{
jsonSerializerType,
{
- AcceptContentTypes: []string{runtime.ContentTypeYAML},
- ContentType: runtime.ContentTypeYAML,
- FileExtensions: []string{"yaml"},
- EncodesAsText: true,
- Serializer: yamlSerializer,
- StrictSerializer: strictYAMLSerializer,
+ MediaType: runtime.ContentTypeYAML,
+ MediaTypeType: "application",
+ MediaTypeSubType: "yaml",
+ EncodesAsText: true,
+ Serializer: yamlSerializer,
+ StrictSerializer: strictYAMLSerializer,
},
{
- AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
- ContentType: runtime.ContentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: protoSerializer,
+ MediaType: runtime.ContentTypeProtobuf,
+ MediaTypeType: "application",
+ MediaTypeSubType: "vnd.kubernetes.protobuf",
+ Serializer: protoSerializer,
// note, strict decoding is unsupported for protobuf,
// fall back to regular serializing
StrictSerializer: protoSerializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: protoRawSerializer,
+ StreamSerializer: &runtime.StreamSerializerInfo{
+ Serializer: protoRawSerializer,
+ Framer: protobuf.LengthDelimitedFramer,
+ },
},
}
- for _, fn := range serializerExtensions {
- if serializer, ok := fn(scheme); ok {
- serializers = append(serializers, serializer)
- }
+ for _, f := range options.serializers {
+ serializers = append(serializers, f(scheme, scheme))
}
+
return serializers
}
@@ -136,6 +112,8 @@ type CodecFactoryOptions struct {
Strict bool
// Pretty includes a pretty serializer along with the non-pretty one
Pretty bool
+
+ serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo
}
// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
@@ -162,6 +140,13 @@ func DisableStrict(options *CodecFactoryOptions) {
options.Strict = false
}
+// WithSerializer configures a serializer to be supported in addition to the default serializers.
+func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator {
+ return func(options *CodecFactoryOptions) {
+ options.serializers = append(options.serializers, f)
+ }
+}
+
// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
// and conversion wrappers to define preferred internal and external versions. In the future,
// as the internal version is used less, callers may instead use a defaulting serializer and
@@ -184,7 +169,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta
}
// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory {
decoders := make([]runtime.Decoder, 0, len(serializers))
var accepts []runtime.SerializerInfo
alreadyAccepted := make(map[string]struct{})
@@ -192,38 +177,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
var legacySerializer runtime.Serializer
for _, d := range serializers {
decoders = append(decoders, d.Serializer)
- for _, mediaType := range d.AcceptContentTypes {
- if _, ok := alreadyAccepted[mediaType]; ok {
- continue
- }
- alreadyAccepted[mediaType] = struct{}{}
- info := runtime.SerializerInfo{
- MediaType: d.ContentType,
- EncodesAsText: d.EncodesAsText,
- Serializer: d.Serializer,
- PrettySerializer: d.PrettySerializer,
- StrictSerializer: d.StrictSerializer,
- }
-
- mediaType, _, err := mime.ParseMediaType(info.MediaType)
- if err != nil {
- panic(err)
- }
- parts := strings.SplitN(mediaType, "/", 2)
- info.MediaTypeType = parts[0]
- info.MediaTypeSubType = parts[1]
-
- if d.StreamSerializer != nil {
- info.StreamSerializer = &runtime.StreamSerializerInfo{
- Serializer: d.StreamSerializer,
- EncodesAsText: d.EncodesAsText,
- Framer: d.Framer,
- }
- }
- accepts = append(accepts, info)
- if mediaType == runtime.ContentTypeJSON {
- legacySerializer = d.Serializer
- }
+ if _, ok := alreadyAccepted[d.MediaType]; ok {
+ continue
+ }
+ alreadyAccepted[d.MediaType] = struct{}{}
+
+ acceptedSerializerShallowCopy := d
+ if d.StreamSerializer != nil {
+ cloned := *d.StreamSerializer
+ acceptedSerializerShallowCopy.StreamSerializer = &cloned
+ }
+ accepts = append(accepts, acceptedSerializerShallowCopy)
+
+ if d.MediaType == runtime.ContentTypeJSON {
+ legacySerializer = d.Serializer
}
}
if legacySerializer == nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
index 1680c149f..ca7b7cc2d 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -43,10 +43,11 @@ type TypeMeta struct {
}
const (
- ContentTypeJSON string = "application/json"
- ContentTypeYAML string = "application/yaml"
- ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
- ContentTypeCBOR string = "application/cbor"
+ ContentTypeJSON string = "application/json"
+ ContentTypeYAML string = "application/yaml"
+ ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+ ContentTypeCBOR string = "application/cbor" // RFC 8949
+ ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742
)
// RawExtension is used to hold extensions in external versions.
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
index fe8ecaaff..d338cf213 100644
--- a/vendor/k8s.io/apimachinery/pkg/types/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -25,5 +25,7 @@ const (
JSONPatchType PatchType = "application/json-patch+json"
MergePatchType PatchType = "application/merge-patch+json"
StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
- ApplyPatchType PatchType = "application/apply-patch+yaml"
+ ApplyPatchType PatchType = ApplyYAMLPatchType
+ ApplyYAMLPatchType PatchType = "application/apply-patch+yaml"
+ ApplyCBORPatchType PatchType = "application/apply-patch+cbor"
)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
index 978ffb3c3..de540c82f 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
@@ -19,11 +19,12 @@ package managedfields
import (
"fmt"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
)
// FieldManager updates the managed fields and merges applied
@@ -32,7 +33,7 @@ type FieldManager = internal.FieldManager
// NewDefaultFieldManager creates a new FieldManager that merges apply requests
// and update managed fields for other types of requests.
-func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) {
+func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (*FieldManager, error) {
f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
@@ -43,7 +44,7 @@ func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime
// NewDefaultCRDFieldManager creates a new FieldManager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
-func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) {
+func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ *FieldManager, err error) {
f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
if err != nil {
return nil, fmt.Errorf("failed to create field manager: %v", err)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
index 786ad991c..3fe36edc9 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
@@ -19,13 +19,14 @@ package internal
import (
"fmt"
+ "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+ "sigs.k8s.io/structured-merge-diff/v4/merge"
+ "sigs.k8s.io/structured-merge-diff/v4/typed"
+
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
- "sigs.k8s.io/structured-merge-diff/v4/merge"
- "sigs.k8s.io/structured-merge-diff/v4/typed"
)
type structuredMergeManager struct {
@@ -41,7 +42,7 @@ var _ Manager = &structuredMergeManager{}
// NewStructuredMergeManager creates a new Manager that merges apply requests
// and update managed fields for other types of requests.
-func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) {
+func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (Manager, error) {
if typeConverter == nil {
return nil, fmt.Errorf("typeconverter must not be nil")
}
@@ -52,8 +53,8 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
- Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
- IgnoredFields: resetFields,
+ Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
+ IgnoreFilter: resetFields,
},
}, nil
}
@@ -61,7 +62,7 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
// NewCRDStructuredMergeManager creates a new Manager specifically for
// CRDs. This allows for the possibility of fields which are not defined
// in models, as well as having no models defined at all.
-func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) {
+func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ Manager, err error) {
return &structuredMergeManager{
typeConverter: typeConverter,
objectConverter: objectConverter,
@@ -69,8 +70,8 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r
groupVersion: gv,
hubVersion: hub,
updater: merge.Updater{
- Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
- IgnoredFields: resetFields,
+ Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
+ IgnoreFilter: resetFields,
},
}, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 4fe0c5eb2..df374949d 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -45,7 +45,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic}
//
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
//
-// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
+// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
func HandleCrash(additionalHandlers ...func(interface{})) {
if r := recover(); r != nil {
additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
@@ -146,7 +146,7 @@ type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues
// is preferable to logging the error - the default behavior is to log but the
// errors may be sent to a remote server for analysis.
//
-// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
+// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
func HandleError(err error) {
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
if err == nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index bc387d011..f1634bc0d 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -220,26 +220,24 @@ func Forbidden(field *Path, detail string) *Error {
return &Error{ErrorTypeForbidden, field.String(), "", detail}
}
-// TooLong returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value.
+// TooLong returns a *Error indicating "too long". This is used to report that
+// the given value is too long. This is similar to Invalid, but the returned
+// error will not include the too-long value. If maxLength is negative, it will
+// be included in the message. The value argument is not used.
func TooLong(field *Path, value interface{}, maxLength int) *Error {
- return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
-}
-
-// TooLongMaxLength returns a *Error indicating "too long". This is used to
-// report that the given value is too long. This is similar to
-// Invalid, but the returned error will not include the too-long
-// value. If maxLength is negative, no max length will be included in the message.
-func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
var msg string
if maxLength >= 0 {
- msg = fmt.Sprintf("may not be longer than %d", maxLength)
+ msg = fmt.Sprintf("may not be more than %d bytes", maxLength)
} else {
msg = "value is too long"
}
- return &Error{ErrorTypeTooLong, field.String(), value, msg}
+ return &Error{ErrorTypeTooLong, field.String(), "", msg}
+}
+
+// TooLongMaxLength returns a *Error indicating "too long".
+// Deprecated: Use TooLong instead.
+func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
+ return TooLong(field, "", maxLength)
}
// TooMany returns a *Error indicating "too many". This is used to
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index b32644902..9bc393cf5 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -175,6 +175,8 @@ func IsValidLabelValue(value string) []string {
}
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
+
const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
@@ -204,10 +206,14 @@ func IsDNS1123Label(value string) []string {
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*"
+const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character"
+
// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
const DNS1123SubdomainMaxLength int = 253
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$")
// IsDNS1123Subdomain tests for a string that conforms to the definition of a
// subdomain in DNS (RFC 1123).
@@ -222,6 +228,19 @@ func IsDNS1123Subdomain(value string) []string {
return errs
}
+// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string
+func IsDNS1123SubdomainWithUnderscore(value string) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) {
+ errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com"))
+ }
+ return errs
+}
+
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
index e8e371d7d..e840fe9eb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
// with apply.
type MatchResourcesApplyConfiguration struct {
- NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
- MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
}
// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
@@ -42,7 +42,7 @@ func MatchResources() *MatchResourcesApplyConfiguration {
// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamespaceSelector field is set to the value of the last call.
-func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
+func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
b.NamespaceSelector = value
return b
}
@@ -50,7 +50,7 @@ func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.Label
// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ObjectSelector field is set to the value of the last call.
-func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
+func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
b.ObjectSelector = value
return b
}
@@ -84,7 +84,7 @@ func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*N
// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MatchPolicy field is set to the value of the last call.
-func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value apiadmissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration {
+func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration {
b.MatchPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
index 58b71d6d5..4267f5fbf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use
// with apply.
type MutatingWebhookConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with
@@ -56,18 +56,18 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "")
}
// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status")
}
-func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
b := &MutatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmiss
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string)
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st
// If called multiple times, the Name field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string)
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str
// If called multiple times, the UID field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val
// If called multiple times, the Generation field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
+func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
+func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod
// overwriting an existing map entries in Labels field with the same key.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[
// overwriting an existing map entries in Annotations field with the same key.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration {
+func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val
func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *MutatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -254,5 +254,5 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ...
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
index eda3bf635..dd31981ad 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
@@ -50,7 +50,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...
// If called multiple times, values provided by each call will be appended to the Operations field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Operations = append(b.Operations, values[i])
+ b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i])
}
return b
}
@@ -60,7 +60,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm
// If called multiple times, values provided by each call will be appended to the APIGroups field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIGroups = append(b.APIGroups, values[i])
+ b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i])
}
return b
}
@@ -70,7 +70,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri
// If called multiple times, values provided by each call will be appended to the APIVersions field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIVersions = append(b.APIVersions, values[i])
+ b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i])
}
return b
}
@@ -80,7 +80,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st
// If called multiple times, values provided by each call will be appended to the Resources field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Resources = append(b.Resources, values[i])
+ b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i])
}
return b
}
@@ -89,6 +89,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scope field is set to the value of the last call.
func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration {
- b.Scope = &value
+ b.RuleApplyConfiguration.Scope = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
index 73cda9b04..140233f6b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
@@ -20,7 +20,7 @@ package v1
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
@@ -28,7 +28,7 @@ import (
type ParamRefApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Namespace *string `json:"namespace,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
@@ -57,7 +57,7 @@ func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyC
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration {
+func (b *ParamRefApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
index 36a93643c..a8c68136b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
)
// RuleApplyConfiguration represents a declarative configuration of the Rule type for use
// with apply.
type RuleApplyConfiguration struct {
- APIGroups []string `json:"apiGroups,omitempty"`
- APIVersions []string `json:"apiVersions,omitempty"`
- Resources []string `json:"resources,omitempty"`
- Scope *v1.ScopeType `json:"scope,omitempty"`
+ APIGroups []string `json:"apiGroups,omitempty"`
+ APIVersions []string `json:"apiVersions,omitempty"`
+ Resources []string `json:"resources,omitempty"`
+ Scope *admissionregistrationv1.ScopeType `json:"scope,omitempty"`
}
// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with
@@ -70,7 +70,7 @@ func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfi
// WithScope sets the Scope field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scope field is set to the value of the last call.
-func (b *RuleApplyConfiguration) WithScope(value v1.ScopeType) *RuleApplyConfiguration {
+func (b *RuleApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleApplyConfiguration {
b.Scope = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
index 92bddd502..55a985f99 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
)
// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use
// with apply.
type RuleWithOperationsApplyConfiguration struct {
- Operations []v1.OperationType `json:"operations,omitempty"`
+ Operations []admissionregistrationv1.OperationType `json:"operations,omitempty"`
RuleApplyConfiguration `json:",inline"`
}
@@ -38,7 +38,7 @@ func RuleWithOperations() *RuleWithOperationsApplyConfiguration {
// WithOperations adds the given value to the Operations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Operations field.
-func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration {
+func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *RuleWithOperationsApplyConfiguration {
for i := range values {
b.Operations = append(b.Operations, values[i])
}
@@ -50,7 +50,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.Opera
// If called multiple times, values provided by each call will be appended to the APIGroups field.
func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIGroups = append(b.APIGroups, values[i])
+ b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i])
}
return b
}
@@ -60,7 +60,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *
// If called multiple times, values provided by each call will be appended to the APIVersions field.
func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIVersions = append(b.APIVersions, values[i])
+ b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i])
}
return b
}
@@ -70,7 +70,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string)
// If called multiple times, values provided by each call will be appended to the Resources field.
func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration {
for i := range values {
- b.Resources = append(b.Resources, values[i])
+ b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i])
}
return b
}
@@ -78,7 +78,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *
// WithScope sets the Scope field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scope field is set to the value of the last call.
-func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration {
- b.Scope = &value
+func (b *RuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleWithOperationsApplyConfiguration {
+ b.RuleApplyConfiguration.Scope = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
index 841209cae..730de0369 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
// with apply.
type ValidatingAdmissionPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
}
// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
@@ -57,18 +57,18 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "")
}
// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status")
}
-func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
b := &ValidatingAdmissionPolicyApplyConfiguration{}
err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionreg
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries ma
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration {
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
index 1acad056f..2921a711f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
// with apply.
type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
@@ -56,18 +56,18 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "")
}
// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status")
}
-func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(ent
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
index 0d1a6c81a..a7bebb59f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use
// with apply.
type ValidatingWebhookConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with
@@ -56,18 +56,18 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "")
}
// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status")
}
-func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
b := &ValidatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiad
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
+func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
+func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entri
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration {
+func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ValidatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -254,5 +254,5 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values .
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
index 2a828b6b4..9966a7a28 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
// with apply.
type ValidationApplyConfiguration struct {
- Expression *string `json:"expression,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *v1.StatusReason `json:"reason,omitempty"`
- MessageExpression *string `json:"messageExpression,omitempty"`
+ Expression *string `json:"expression,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Reason *metav1.StatusReason `json:"reason,omitempty"`
+ MessageExpression *string `json:"messageExpression,omitempty"`
}
// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
@@ -56,7 +56,7 @@ func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationAppl
// WithReason sets the Reason field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Reason field is set to the value of the last call.
-func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration {
+func (b *ValidationApplyConfiguration) WithReason(value metav1.StatusReason) *ValidationApplyConfiguration {
b.Reason = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go
new file mode 100644
index 000000000..b08ac7224
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// ApplyConfigurationApplyConfiguration represents a declarative configuration of the ApplyConfiguration type for use
+// with apply.
+type ApplyConfigurationApplyConfiguration struct {
+ Expression *string `json:"expression,omitempty"`
+}
+
+// ApplyConfigurationApplyConfiguration constructs a declarative configuration of the ApplyConfiguration type for use with
+// apply.
+func ApplyConfiguration() *ApplyConfigurationApplyConfiguration {
+ return &ApplyConfigurationApplyConfiguration{}
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *ApplyConfigurationApplyConfiguration) WithExpression(value string) *ApplyConfigurationApplyConfiguration {
+ b.Expression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go
new file mode 100644
index 000000000..418d86a2b
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// JSONPatchApplyConfiguration represents a declarative configuration of the JSONPatch type for use
+// with apply.
+type JSONPatchApplyConfiguration struct {
+ Expression *string `json:"expression,omitempty"`
+}
+
+// JSONPatchApplyConfiguration constructs a declarative configuration of the JSONPatch type for use with
+// apply.
+func JSONPatch() *JSONPatchApplyConfiguration {
+ return &JSONPatchApplyConfiguration{}
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *JSONPatchApplyConfiguration) WithExpression(value string) *JSONPatchApplyConfiguration {
+ b.Expression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
new file mode 100644
index 000000000..d66071c18
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
@@ -0,0 +1,253 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// MutatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicy type for use
+// with apply.
+type MutatingAdmissionPolicyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// MutatingAdmissionPolicy constructs a declarative configuration of the MutatingAdmissionPolicy type for use with
+// apply.
+func MutatingAdmissionPolicy(name string) *MutatingAdmissionPolicyApplyConfiguration {
+ b := &MutatingAdmissionPolicyApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("MutatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
+ return b
+}
+
+// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a
+// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+ return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "")
+}
+
+// ExtractMutatingAdmissionPolicyStatus is the same as ExtractMutatingAdmissionPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractMutatingAdmissionPolicyStatus(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+ return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "status")
+}
+
+func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+ b := &MutatingAdmissionPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(mutatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(mutatingAdmissionPolicy.Name)
+
+ b.WithKind("MutatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *MutatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyApplyConfiguration) WithSpec(value *MutatingAdmissionPolicySpecApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *MutatingAdmissionPolicyApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
new file mode 100644
index 000000000..7cccd291b
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
@@ -0,0 +1,253 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// MutatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBinding type for use
+// with apply.
+type MutatingAdmissionPolicyBindingApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// MutatingAdmissionPolicyBinding constructs a declarative configuration of the MutatingAdmissionPolicyBinding type for use with
+// apply.
+func MutatingAdmissionPolicyBinding(name string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b := &MutatingAdmissionPolicyBindingApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("MutatingAdmissionPolicyBinding")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
+ return b
+}
+
+// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a
+// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "")
+}
+
+// ExtractMutatingAdmissionPolicyBindingStatus is the same as ExtractMutatingAdmissionPolicyBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractMutatingAdmissionPolicyBindingStatus(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "status")
+}
+
+func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+ b := &MutatingAdmissionPolicyBindingApplyConfiguration{}
+ err := managedfields.ExtractInto(mutatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(mutatingAdmissionPolicyBinding.Name)
+
+ b.WithKind("MutatingAdmissionPolicyBinding")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *MutatingAdmissionPolicyBindingSpecApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *MutatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go
new file mode 100644
index 000000000..04729f42b
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go
@@ -0,0 +1,57 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// MutatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use
+// with apply.
+type MutatingAdmissionPolicyBindingSpecApplyConfiguration struct {
+ PolicyName *string `json:"policyName,omitempty"`
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+}
+
+// MutatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use with
+// apply.
+func MutatingAdmissionPolicyBindingSpec() *MutatingAdmissionPolicyBindingSpecApplyConfiguration {
+ return &MutatingAdmissionPolicyBindingSpecApplyConfiguration{}
+}
+
+// WithPolicyName sets the PolicyName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PolicyName field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *MutatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.PolicyName = &value
+ return b
+}
+
+// WithParamRef sets the ParamRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParamRef field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.ParamRef = value
+ return b
+}
+
+// WithMatchResources sets the MatchResources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchResources field is set to the value of the last call.
+func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.MatchResources = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go
new file mode 100644
index 000000000..334056a37
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go
@@ -0,0 +1,113 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+)
+
+// MutatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicySpec type for use
+// with apply.
+type MutatingAdmissionPolicySpecApplyConfiguration struct {
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ Mutations []MutationApplyConfiguration `json:"mutations,omitempty"`
+ FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+}
+
+// MutatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicySpec type for use with
+// apply.
+func MutatingAdmissionPolicySpec() *MutatingAdmissionPolicySpecApplyConfiguration {
+ return &MutatingAdmissionPolicySpecApplyConfiguration{}
+}
+
+// WithParamKind sets the ParamKind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParamKind field is set to the value of the last call.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration {
+ b.ParamKind = value
+ return b
+}
+
+// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchConstraints field is set to the value of the last call.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration {
+ b.MatchConstraints = value
+ return b
+}
+
+// WithVariables adds the given value to the Variables field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Variables field.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithVariables")
+ }
+ b.Variables = append(b.Variables, *values[i])
+ }
+ return b
+}
+
+// WithMutations adds the given value to the Mutations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Mutations field.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMutations(values ...*MutationApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithMutations")
+ }
+ b.Mutations = append(b.Mutations, *values[i])
+ }
+ return b
+}
+
+// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FailurePolicy field is set to the value of the last call.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *MutatingAdmissionPolicySpecApplyConfiguration {
+ b.FailurePolicy = &value
+ return b
+}
+
+// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MatchConditions field.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithMatchConditions")
+ }
+ b.MatchConditions = append(b.MatchConditions, *values[i])
+ }
+ return b
+}
+
+// WithReinvocationPolicy sets the ReinvocationPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ReinvocationPolicy field is set to the value of the last call.
+func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithReinvocationPolicy(value v1.ReinvocationPolicyType) *MutatingAdmissionPolicySpecApplyConfiguration {
+ b.ReinvocationPolicy = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go
new file mode 100644
index 000000000..4ed9d93fd
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go
@@ -0,0 +1,61 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+)
+
+// MutationApplyConfiguration represents a declarative configuration of the Mutation type for use
+// with apply.
+type MutationApplyConfiguration struct {
+ PatchType *admissionregistrationv1alpha1.PatchType `json:"patchType,omitempty"`
+ ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"`
+ JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"`
+}
+
+// MutationApplyConfiguration constructs a declarative configuration of the Mutation type for use with
+// apply.
+func Mutation() *MutationApplyConfiguration {
+ return &MutationApplyConfiguration{}
+}
+
+// WithPatchType sets the PatchType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PatchType field is set to the value of the last call.
+func (b *MutationApplyConfiguration) WithPatchType(value admissionregistrationv1alpha1.PatchType) *MutationApplyConfiguration {
+ b.PatchType = &value
+ return b
+}
+
+// WithApplyConfiguration sets the ApplyConfiguration field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ApplyConfiguration field is set to the value of the last call.
+func (b *MutationApplyConfiguration) WithApplyConfiguration(value *ApplyConfigurationApplyConfiguration) *MutationApplyConfiguration {
+ b.ApplyConfiguration = value
+ return b
+}
+
+// WithJSONPatch sets the JSONPatch field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the JSONPatch field is set to the value of the last call.
+func (b *MutationApplyConfiguration) WithJSONPatch(value *JSONPatchApplyConfiguration) *MutationApplyConfiguration {
+ b.JSONPatch = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
index 5e6744fd7..f630224ac 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
@@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...
// If called multiple times, values provided by each call will be appended to the Operations field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Operations = append(b.Operations, values[i])
+ b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i])
}
return b
}
@@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm
// If called multiple times, values provided by each call will be appended to the APIGroups field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIGroups = append(b.APIGroups, values[i])
+ b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i])
}
return b
}
@@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri
// If called multiple times, values provided by each call will be appended to the APIVersions field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIVersions = append(b.APIVersions, values[i])
+ b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i])
}
return b
}
@@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st
// If called multiple times, values provided by each call will be appended to the Resources field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Resources = append(b.Resources, values[i])
+ b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i])
}
return b
}
@@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scope field is set to the value of the last call.
func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration {
- b.Scope = &value
+ b.RuleApplyConfiguration.Scope = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
index c4fff1d47..669fadbd4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1alpha1
import (
- v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
// with apply.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ ParameterNotFoundAction *admissionregistrationv1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with
@@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo
// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call.
-func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
+func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
b.ParameterNotFoundAction = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
index fe60eb5f2..7fd1c0651 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
@@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
index 0c11ee594..ca8ac7dd0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
@@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 51bb82389..15c54c125 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admission
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string)
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st
// If called multiple times, the Name field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string)
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str
// If called multiple times, the UID field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val
// If called multiple times, the Generation field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value in
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(v
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod
// overwriting an existing map entries in Labels field with the same key.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[
// overwriting an existing map entries in Annotations field with the same key.
func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val
func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -254,5 +254,5 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ...
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
index 5de70c7ad..62c617d2f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
@@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...
// If called multiple times, values provided by each call will be appended to the Operations field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Operations = append(b.Operations, values[i])
+ b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i])
}
return b
}
@@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm
// If called multiple times, values provided by each call will be appended to the APIGroups field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIGroups = append(b.APIGroups, values[i])
+ b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i])
}
return b
}
@@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri
// If called multiple times, values provided by each call will be appended to the APIVersions field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.APIVersions = append(b.APIVersions, values[i])
+ b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i])
}
return b
}
@@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st
// If called multiple times, values provided by each call will be appended to the Resources field.
func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration {
for i := range values {
- b.Resources = append(b.Resources, values[i])
+ b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i])
}
return b
}
@@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scope field is set to the value of the last call.
func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration {
- b.Scope = &value
+ b.RuleApplyConfiguration.Scope = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
index 0a94ae067..5143b0cb9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
// with apply.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ ParameterNotFoundAction *admissionregistrationv1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with
@@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo
// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call.
-func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
+func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
b.ParameterNotFoundAction = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
index c29ee56cb..35a8adbf7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values
func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
index 4347c4810..191d045ef 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index c3535c180..e775e55a3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admis
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value
// If called multiple times, the Name field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s
// If called multiple times, the UID field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri
// overwriting an existing map entries in Labels field with the same key.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma
// overwriting an existing map entries in Annotations field with the same key.
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v
func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -254,5 +254,5 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values .
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
index d734328b0..0061d8afb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
- v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
+ apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -32,8 +32,8 @@ import (
type StorageVersionApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *v1alpha1.StorageVersionSpec `json:"spec,omitempty"`
- Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"`
+ Spec *apiserverinternalv1alpha1.StorageVersionSpec `json:"spec,omitempty"`
+ Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"`
}
// StorageVersion constructs a declarative configuration of the StorageVersion type for use with
@@ -57,18 +57,18 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
+func ExtractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
return extractStorageVersion(storageVersion, fieldManager, "")
}
// ExtractStorageVersionStatus is the same as ExtractStorageVersion except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractStorageVersionStatus(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
+func ExtractStorageVersionStatus(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
return extractStorageVersion(storageVersion, fieldManager, "status")
}
-func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) {
+func extractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) {
b := &StorageVersionApplyConfiguration{}
err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *StorageVersionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *Storage
// If called multiple times, the Name field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *Stora
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageV
// If called multiple times, the UID field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVers
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *St
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageV
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Ti
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Ti
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *StorageVersionApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own
func (b *StorageVersionApplyConfiguration) WithFinalizers(values ...string) *StorageVersionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -242,7 +242,7 @@ func (b *StorageVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExi
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
-func (b *StorageVersionApplyConfiguration) WithSpec(value v1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration {
+func (b *StorageVersionApplyConfiguration) WithSpec(value apiserverinternalv1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration {
b.Spec = &value
return b
}
@@ -258,5 +258,5 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StorageVersionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
index 68d894d0c..1ed71cf8e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1alpha1
import (
- v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
+ apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use
// with apply.
type StorageVersionConditionApplyConfiguration struct {
- Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"`
- Status *v1alpha1.ConditionStatus `json:"status,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *apiserverinternalv1alpha1.StorageVersionConditionType `json:"type,omitempty"`
+ Status *apiserverinternalv1alpha1.ConditionStatus `json:"status,omitempty"`
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with
@@ -43,7 +43,7 @@ func StorageVersionCondition() *StorageVersionConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration {
+func (b *StorageVersionConditionApplyConfiguration) WithType(value apiserverinternalv1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -51,7 +51,7 @@ func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.Stor
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *StorageVersionConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration {
+func (b *StorageVersionConditionApplyConfiguration) WithStatus(value apiserverinternalv1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
index 25b645059..bfdad4a73 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
@@ -20,21 +20,21 @@ package v1
import (
appsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
// with apply.
type ControllerRevisionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
- Revision *int64 `json:"revision,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ Revision *int64 `json:"revision,omitempty"`
}
// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
@@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con
// If called multiple times, the Name field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont
// If called multiple times, the UID field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,25 +150,25 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
+func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
+func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -214,13 +214,13 @@ func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration {
+func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,14 +231,14 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ControllerRevisionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
index a15785651..47883d043 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiappsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ appsv1 "k8s.io/api/apps/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
// with apply.
type DaemonSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
}
// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
@@ -58,18 +58,18 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+func ExtractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
return extractDaemonSet(daemonSet, fieldManager, "")
}
// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractDaemonSetStatus(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+func ExtractDaemonSetStatus(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
return extractDaemonSet(daemonSet, fieldManager, "status")
}
-func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
+func extractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
b := &DaemonSetApplyConfiguration{}
err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subre
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
+func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
+func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae
// overwriting an existing map entries in Annotations field with the same key.
func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration {
+func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DaemonSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
index de91745b8..8c56e4994 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
type DaemonSetConditionApplyConfiguration struct {
- Type *v1.DaemonSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1.DaemonSetConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
@@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetConditionApplyConfiguration) WithType(value v1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
+func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
index 99dc5abae..d2382b80e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
@@ -20,13 +20,13 @@ package v1
import (
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
// with apply.
type DaemonSetSpecApplyConfiguration struct {
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
@@ -42,7 +42,7 @@ func DaemonSetSpec() *DaemonSetSpecApplyConfiguration {
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration {
+func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
index 15af4e66b..993e1bd57 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
)
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ Type *appsv1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
+func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
index 52b7a21b7..485357c00 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiappsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ appsv1 "k8s.io/api/apps/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
// with apply.
type DeploymentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
}
// Deployment constructs a declarative configuration of the Deployment type for use with
@@ -58,18 +58,18 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractDeployment(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+func ExtractDeployment(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
return extractDeployment(deployment, fieldManager, "")
}
// ExtractDeploymentStatus is the same as ExtractDeployment except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractDeploymentStatus(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+func ExtractDeploymentStatus(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
return extractDeployment(deployment, fieldManager, "status")
}
-func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
+func extractDeployment(deployment *appsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
b := &DeploymentApplyConfiguration{}
err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, su
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA
// If called multiple times, the Name field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
+func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
+func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De
// overwriting an existing map entries in Annotations field with the same key.
func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration {
+func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DeploymentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
index 84df752bc..3a6693637 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
type DeploymentConditionApplyConfiguration struct {
- Type *v1.DeploymentConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1.DeploymentConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
@@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentConditionApplyConfiguration) WithType(value v1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
+func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
index 063f1c276..5f34b0582 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
@@ -20,14 +20,14 @@ package v1
import (
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
// with apply.
type DeploymentSpecApplyConfiguration struct {
Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
@@ -53,7 +53,7 @@ func (b *DeploymentSpecApplyConfiguration) WithReplicas(value int32) *Deployment
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *DeploymentSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration {
+func (b *DeploymentSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
index dc4b97c55..7bf8a1595 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
)
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
type DeploymentStrategyApplyConfiguration struct {
- Type *v1.DeploymentStrategyType `json:"type,omitempty"`
+ Type *appsv1.DeploymentStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentStrategyApplyConfiguration) WithType(value v1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
+func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
index 35ca4e4df..6e9c0e14f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiappsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ appsv1 "k8s.io/api/apps/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
// with apply.
type ReplicaSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
@@ -58,18 +58,18 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+func ExtractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
return extractReplicaSet(replicaSet, fieldManager, "")
}
// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractReplicaSetStatus(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+func ExtractReplicaSetStatus(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
return extractReplicaSet(replicaSet, fieldManager, "status")
}
-func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
+func extractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
b := &ReplicaSetApplyConfiguration{}
err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, su
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA
// If called multiple times, the Name field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
+func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
+func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re
// overwriting an existing map entries in Annotations field with the same key.
func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration {
+func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ReplicaSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
index 32da80842..0325ce058 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
type ReplicaSetConditionApplyConfiguration struct {
- Type *v1.ReplicaSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1.ReplicaSetConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
@@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
+func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
index 039058486..714ddcfe3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
@@ -20,7 +20,7 @@ package v1
import (
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
@@ -28,7 +28,7 @@ import (
type ReplicaSetSpecApplyConfiguration struct {
Replicas *int32 `json:"replicas,omitempty"`
MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
@@ -57,7 +57,7 @@ func (b *ReplicaSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *Rep
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration {
+func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
index 6f2b340da..cb5306935 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiappsv1 "k8s.io/api/apps/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ appsv1 "k8s.io/api/apps/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
// with apply.
type StatefulSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
}
// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
@@ -58,18 +58,18 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+func ExtractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
return extractStatefulSet(statefulSet, fieldManager, "")
}
// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractStatefulSetStatus(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+func ExtractStatefulSetStatus(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
return extractStatefulSet(statefulSet, fieldManager, "status")
}
-func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
+func extractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
b := &StatefulSetApplyConfiguration{}
err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe
// If called multiple times, the Name field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet
// If called multiple times, the UID field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
+func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
+func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S
// overwriting an existing map entries in Annotations field with the same key.
func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration {
+func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StatefulSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
index c62a5e854..45b2ad81f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
type StatefulSetConditionApplyConfiguration struct {
- Type *v1.StatefulSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1.StatefulSetConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
@@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetConditionApplyConfiguration) WithType(value v1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
+func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
index cd65fd436..dff3e2a76 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
)
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
- WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ WhenDeleted *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
+ WhenScaled *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
@@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol
// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenDeleted field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenDeleted = &value
return b
}
@@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With
// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenScaled field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenScaled = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
index 1848a963c..c48b64fe3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
@@ -21,14 +21,14 @@ package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
// with apply.
type StatefulSetSpecApplyConfiguration struct {
Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
ServiceName *string `json:"serviceName,omitempty"`
@@ -57,7 +57,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithReplicas(value int32) *StatefulS
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration {
+func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
index b59e10735..ae135d34d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
)
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ Type *appsv1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
+func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
index 606de58a1..910dd7bec 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
@@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
return extractControllerRevision(controllerRevision, fieldManager, "")
}
// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractControllerRevisionStatus(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevisionStatus(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
return extractControllerRevision(controllerRevision, fieldManager, "status")
}
-func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
+func extractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, f
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con
// If called multiple times, the Name field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont
// If called multiple times, the UID field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ControllerRevisionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
index 145aaed70..057ea5b6f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
@@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA
// If called multiple times, the Name field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De
// overwriting an existing map entries in Annotations field with the same key.
func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DeploymentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
index 504dddd94..b0a45b1a6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
type DeploymentConditionApplyConfiguration struct {
- Type *v1beta1.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta1.DeploymentConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
@@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
+func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
index 2c322b4ac..03e66555a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
)
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
type DeploymentStrategyApplyConfiguration struct {
- Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ Type *appsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
+func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
index 270593886..ba8aa3a4c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
@@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe
// If called multiple times, the Name field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet
// If called multiple times, the UID field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S
// overwriting an existing map entries in Annotations field with the same key.
func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StatefulSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
index 8a17391cd..5a13584bc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
type StatefulSetConditionApplyConfiguration struct {
- Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta1.StatefulSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
@@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
+func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
index 69a8ee0f0..f9b6fbd88 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
)
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
- WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ WhenDeleted *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
+ WhenScaled *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
@@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol
// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenDeleted field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenDeleted = &value
return b
}
@@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With
// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenScaled field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenScaled = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
index ac325d717..137c7243b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
@@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct {
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
ServiceName *string `json:"serviceName,omitempty"`
- PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
+ PodManagementPolicy *appsv1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
@@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State
// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodManagementPolicy field is set to the value of the last call.
-func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration {
+func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration {
b.PodManagementPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
index 7714ebbb7..24154f7af 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
)
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ Type *appsv1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
+func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
index 5f75a4551..6facd5384 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
@@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
return extractControllerRevision(controllerRevision, fieldManager, "")
}
// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractControllerRevisionStatus(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevisionStatus(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
return extractControllerRevision(controllerRevision, fieldManager, "status")
}
-func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
+func extractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, f
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con
// If called multiple times, the Name field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont
// If called multiple times, the UID field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -261,5 +261,5 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ControllerRevisionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
index 9ffda6182..89a2ebd4b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
@@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, sub
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae
// overwriting an existing map entries in Annotations field with the same key.
func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DaemonSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
index 8315050f0..0aa47cf0a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
type DaemonSetConditionApplyConfiguration struct {
- Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta2.DaemonSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
@@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
+func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
index 7d66f1da4..2cee58cf3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
)
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ Type *appsv1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
+func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
index 485da788a..8948cc606 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
@@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA
// If called multiple times, the Name field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De
// overwriting an existing map entries in Annotations field with the same key.
func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DeploymentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
index 192427874..f404dd9df 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
type DeploymentConditionApplyConfiguration struct {
- Type *v1beta2.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta2.DeploymentConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
@@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
+func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
index c769436ee..6347a3a39 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
)
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
type DeploymentStrategyApplyConfiguration struct {
- Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"`
+ Type *appsv1beta2.DeploymentStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
+func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
index d8608aa51..679416b21 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
@@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA
// If called multiple times, the Name field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re
// overwriting an existing map entries in Annotations field with the same key.
func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ReplicaSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
index beec546f7..3d8cd3632 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
type ReplicaSetConditionApplyConfiguration struct {
- Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta2.ReplicaSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
@@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
+func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
index 126ab2d8b..27067b6aa 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
@@ -30,8 +30,8 @@ import (
type ScaleApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *v1beta2.ScaleSpec `json:"spec,omitempty"`
- Status *v1beta2.ScaleStatus `json:"status,omitempty"`
+ Spec *appsv1beta2.ScaleSpec `json:"spec,omitempty"`
+ Status *appsv1beta2.ScaleStatus `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S
// overwriting an existing map entries in Labels field with the same key.
func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithSpec(value appsv1beta2.ScaleSpec) *ScaleApplyConfiguration {
b.Spec = &value
return b
}
@@ -212,7 +212,7 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyC
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithStatus(value appsv1beta2.ScaleStatus) *ScaleApplyConfiguration {
b.Status = &value
return b
}
@@ -220,5 +220,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ScaleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
index 3d2b5d191..933072421 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
@@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe
// If called multiple times, the Name field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet
// If called multiple times, the UID field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S
// overwriting an existing map entries in Annotations field with the same key.
func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StatefulSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
index aa45db686..50bef2003 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
type StatefulSetConditionApplyConfiguration struct {
- Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *appsv1beta2.StatefulSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
@@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
+func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
index 318e5f464..d4d139ae3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
)
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
- WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ WhenDeleted *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
+ WhenScaled *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
@@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol
// WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenDeleted field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenDeleted = &value
return b
}
@@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With
// WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenScaled field is set to the value of the last call.
-func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
+func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration {
b.WhenScaled = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
index bebf80c89..952ca0a81 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
@@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct {
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
ServiceName *string `json:"serviceName,omitempty"`
- PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
+ PodManagementPolicy *appsv1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
@@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State
// WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodManagementPolicy field is set to the value of the last call.
-func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration {
+func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration {
b.PodManagementPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
index 81d4ba1df..f93db4f79 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
)
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ Type *appsv1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
+func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
index 8150635ee..8c9f08a73 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiautoscalingv1 "k8s.io/api/autoscaling/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
// with apply.
type HorizontalPodAutoscalerApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
}
// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
@@ -58,18 +58,18 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
}
// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
}
-func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
b := &HorizontalPodAutoscalerApplyConfiguration{}
err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.Ho
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string)
// If called multiple times, the Name field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string)
// If called multiple times, the UID field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st
// If called multiple times, the Generation field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
+func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
+func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon
// overwriting an existing map entries in Labels field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration {
+func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
index fcb231c3b..8575214e1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
// with apply.
type HorizontalPodAutoscalerStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
- CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"`
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"`
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
+ CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"`
}
// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
@@ -49,7 +49,7 @@ func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithObservedGeneration
// WithLastScaleTime sets the LastScaleTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastScaleTime field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value v1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration {
+func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value metav1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration {
b.LastScaleTime = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
index 40f3db8c5..13ae8e142 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
// with apply.
type ScaleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ScaleStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ScaleStatusApplyConfiguration `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -46,7 +46,7 @@ func Scale() *ScaleApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -54,7 +54,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -63,7 +63,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -72,7 +72,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -81,7 +81,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -90,7 +90,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -99,7 +99,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -108,25 +108,25 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -135,7 +135,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -145,11 +145,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S
// overwriting an existing map entries in Labels field with the same key.
func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -160,11 +160,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -172,13 +172,13 @@ func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *Sc
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -189,14 +189,14 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -219,5 +219,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ScaleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
index e26b530c1..99a5cd4bd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
@@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.Horiz
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string)
// If called multiple times, the Name field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string)
// If called multiple times, the UID field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st
// If called multiple times, the Generation field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64)
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon
// overwriting an existing map entries in Labels field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
index 844c6dc86..25ea39039 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *autoscalingv2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
@@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
+func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
index b8b735747..f89185c57 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
@@ -19,15 +19,15 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
)
// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
// with apply.
type HPAScalingPolicyApplyConfiguration struct {
- Type *v2.HPAScalingPolicyType `json:"type,omitempty"`
- Value *int32 `json:"value,omitempty"`
- PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
+ Type *autoscalingv2.HPAScalingPolicyType `json:"type,omitempty"`
+ Value *int32 `json:"value,omitempty"`
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
}
// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
@@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration {
+func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
index c7020f77b..6a6a2655f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
@@ -19,14 +19,14 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
)
// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
// with apply.
type HPAScalingRulesApplyConfiguration struct {
StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
- SelectPolicy *v2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
+ SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
}
@@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value
// WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SelectPolicy field is set to the value of the last call.
-func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration {
+func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration {
b.SelectPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
index 89e6b5c68..282b84a44 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
)
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
type MetricSpecApplyConfiguration struct {
- Type *v2.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricSpecApplyConfiguration) WithType(value v2.MetricSourceType) *MetricSpecApplyConfiguration {
+func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
index 86ae3348b..f1204824e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
)
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
type MetricStatusApplyConfiguration struct {
- Type *v2.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricStatusApplyConfiguration) WithType(value v2.MetricSourceType) *MetricStatusApplyConfiguration {
+func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricStatusApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
index bf68a1c34..13d2e9365 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
@@ -19,17 +19,17 @@ limitations under the License.
package v2
import (
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
resource "k8s.io/apimachinery/pkg/api/resource"
)
// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
// with apply.
type MetricTargetApplyConfiguration struct {
- Type *v2.MetricTargetType `json:"type,omitempty"`
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ Type *autoscalingv2.MetricTargetType `json:"type,omitempty"`
+ Value *resource.Quantity `json:"value,omitempty"`
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
@@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricTargetApplyConfiguration) WithType(value v2.MetricTargetType) *MetricTargetApplyConfiguration {
+func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2.MetricTargetType) *MetricTargetApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
index 93e37eaff..51ae84901 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string)
// If called multiple times, the Name field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string)
// If called multiple times, the UID field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st
// If called multiple times, the Generation field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64)
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon
// overwriting an existing map entries in Labels field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
index 8bb82298d..445cd55ae 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v2beta1
import (
- v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *autoscalingv2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
@@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
+func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
index 961e2c5b4..3a5faa3b2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta1
import (
- v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
)
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
type MetricSpecApplyConfiguration struct {
- Type *v2beta1.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricSpecApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricSpecApplyConfiguration {
+func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
index 587b5a1f8..f281e182d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta1
import (
- v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
)
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
type MetricStatusApplyConfiguration struct {
- Type *v2beta1.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricStatusApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricStatusApplyConfiguration {
+func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricStatusApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
index ce666f0f3..19794ff42 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string)
// If called multiple times, the Name field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string)
// If called multiple times, the UID field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st
// If called multiple times, the Generation field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64)
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon
// overwriting an existing map entries in Labels field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values .
func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
index a73e7ebaa..f88869124 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,11 +27,11 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *autoscalingv2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
@@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
+func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
index b799f99e0..2bbbbddec 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
@@ -19,15 +19,15 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
)
// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
// with apply.
type HPAScalingPolicyApplyConfiguration struct {
- Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"`
- Value *int32 `json:"value,omitempty"`
- PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
+ Type *autoscalingv2beta2.HPAScalingPolicyType `json:"type,omitempty"`
+ Value *int32 `json:"value,omitempty"`
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
}
// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
@@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration {
+func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
index f7e8d9ae3..92aa449aa 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
@@ -19,15 +19,15 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
)
// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
// with apply.
type HPAScalingRulesApplyConfiguration struct {
- StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
- SelectPolicy *v2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
- Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
+ StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
+ SelectPolicy *autoscalingv2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
+ Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
}
// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
@@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value
// WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SelectPolicy field is set to the value of the last call.
-func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration {
+func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration {
b.SelectPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
index 3ec710861..3da1617cf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
)
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
type MetricSpecApplyConfiguration struct {
- Type *v2beta2.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricSpecApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricSpecApplyConfiguration {
+func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
index 40d32795b..b528bd760 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
)
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
type MetricStatusApplyConfiguration struct {
- Type *v2beta2.MetricSourceType `json:"type,omitempty"`
+ Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
@@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricStatusApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricStatusApplyConfiguration {
+func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricStatusApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
index aeec3102e..286856d82 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
@@ -19,17 +19,17 @@ limitations under the License.
package v2beta2
import (
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
resource "k8s.io/apimachinery/pkg/api/resource"
)
// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
// with apply.
type MetricTargetApplyConfiguration struct {
- Type *v2beta2.MetricTargetType `json:"type,omitempty"`
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ Type *autoscalingv2beta2.MetricTargetType `json:"type,omitempty"`
+ Value *resource.Quantity `json:"value,omitempty"`
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
@@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MetricTargetApplyConfiguration) WithType(value v2beta2.MetricTargetType) *MetricTargetApplyConfiguration {
+func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2beta2.MetricTargetType) *MetricTargetApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
index 8b26816e5..f96cba1c5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apibatchv1 "k8s.io/api/batch/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use
// with apply.
type CronJobApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
}
// CronJob constructs a declarative configuration of the CronJob type for use with
@@ -58,18 +58,18 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCronJob(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+func ExtractCronJob(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
return extractCronJob(cronJob, fieldManager, "")
}
// ExtractCronJobStatus is the same as ExtractCronJob except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCronJobStatus(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+func ExtractCronJobStatus(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
return extractCronJob(cronJob, fieldManager, "status")
}
-func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
+func extractCronJob(cronJob *batchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
b := &CronJobApplyConfiguration{}
err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresourc
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration {
+func (b *CronJobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration {
+func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ
// overwriting an existing map entries in Annotations field with the same key.
func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration {
+func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *CronJobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CronJobApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
index 62f9b5298..f53d140d3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
)
// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
@@ -28,7 +28,7 @@ type CronJobSpecApplyConfiguration struct {
Schedule *string `json:"schedule,omitempty"`
TimeZone *string `json:"timeZone,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
- ConcurrencyPolicy *v1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
+ ConcurrencyPolicy *batchv1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
Suspend *bool `json:"suspend,omitempty"`
JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
@@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64)
// WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConcurrencyPolicy field is set to the value of the last call.
-func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration {
+func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration {
b.ConcurrencyPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
index 095dfe017..d29d9e892 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
@@ -20,15 +20,15 @@ package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- v1 "k8s.io/client-go/applyconfigurations/core/v1"
+ corev1 "k8s.io/client-go/applyconfigurations/core/v1"
)
// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use
// with apply.
type CronJobStatusApplyConfiguration struct {
- Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
- LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
- LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
+ Active []corev1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
+ LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
+ LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
}
// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with
@@ -40,7 +40,7 @@ func CronJobStatus() *CronJobStatusApplyConfiguration {
// WithActive adds the given value to the Active field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Active field.
-func (b *CronJobStatusApplyConfiguration) WithActive(values ...*v1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration {
+func (b *CronJobStatusApplyConfiguration) WithActive(values ...*corev1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithActive")
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
index 1333e9184..e508f1441 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apibatchv1 "k8s.io/api/batch/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// JobApplyConfiguration represents a declarative configuration of the Job type for use
// with apply.
type JobApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
- Status *JobStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *JobStatusApplyConfiguration `json:"status,omitempty"`
}
// Job constructs a declarative configuration of the Job type for use with
@@ -58,18 +58,18 @@ func Job(name, namespace string) *JobApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractJob(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
+func ExtractJob(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
return extractJob(job, fieldManager, "")
}
// ExtractJobStatus is the same as ExtractJob except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractJobStatus(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
+func ExtractJobStatus(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
return extractJob(job, fieldManager, "status")
}
-func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) {
+func extractJob(job *batchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) {
b := &JobApplyConfiguration{}
err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfigurat
// If called multiple times, the Name field is set to the value of the last call.
func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration {
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfigur
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfigurati
// If called multiple times, the UID field is set to the value of the last call.
func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfi
// If called multiple times, the Generation field is set to the value of the last call.
func (b *JobApplyConfiguration) WithGeneration(value int64) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *JobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobApplyConfiguration {
+func (b *JobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApplyConfiguration {
+func (b *JobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApp
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Job
// overwriting an existing map entries in Labels field with the same key.
func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyC
// overwriting an existing map entries in Annotations field with the same key.
func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobA
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration {
+func (b *JobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference
func (b *JobApplyConfiguration) WithFinalizers(values ...string) *JobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *JobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) *
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *JobApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
index 4f15bc604..fb3c65aba 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use
// with apply.
type JobConditionApplyConfiguration struct {
- Type *v1.JobConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *batchv1.JobConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with
@@ -44,7 +44,7 @@ func JobCondition() *JobConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *JobConditionApplyConfiguration) WithType(value v1.JobConditionType) *JobConditionApplyConfiguration {
+func (b *JobConditionApplyConfiguration) WithType(value batchv1.JobConditionType) *JobConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
index 901c4228e..b9666b03d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use
// with apply.
type JobTemplateSpecApplyConfiguration struct {
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
}
// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with
@@ -42,7 +42,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration {
// If called multiple times, the Name field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -51,7 +51,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -60,7 +60,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -69,7 +69,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp
// If called multiple times, the UID field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -78,7 +78,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -87,25 +87,25 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J
// If called multiple times, the Generation field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration {
+func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration {
+func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -114,7 +114,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -124,11 +124,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -139,11 +139,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string
// overwriting an existing map entries in Annotations field with the same key.
func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -151,13 +151,13 @@ func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration {
+func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -168,14 +168,14 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow
func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *JobTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -190,5 +190,5 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *JobTemplateSpecApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
index cd32296ca..aa4dfc4c1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
)
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use
// with apply.
type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct {
- ContainerName *string `json:"containerName,omitempty"`
- Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"`
- Values []int32 `json:"values,omitempty"`
+ ContainerName *string `json:"containerName,omitempty"`
+ Operator *batchv1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"`
+ Values []int32 `json:"values,omitempty"`
}
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with
@@ -47,7 +47,7 @@ func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainer
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
-func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
+func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value batchv1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration {
b.Operator = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
index 07af4fb0e..6459a6e59 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use
// with apply.
type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct {
- Type *v1.PodConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
+ Type *corev1.PodConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
}
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with
@@ -38,7 +38,7 @@ func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPa
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
+func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value corev1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
b.Type = &value
return b
}
@@ -46,7 +46,7 @@ func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(valu
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
+func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
index b004921d3..847ec7c95 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
)
// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use
// with apply.
type PodFailurePolicyRuleApplyConfiguration struct {
- Action *v1.PodFailurePolicyAction `json:"action,omitempty"`
+ Action *batchv1.PodFailurePolicyAction `json:"action,omitempty"`
OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"`
OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"`
}
@@ -39,7 +39,7 @@ func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration {
// WithAction sets the Action field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Action field is set to the value of the last call.
-func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration {
+func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value batchv1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration {
b.Action = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
index 765ed5e65..133ed36fa 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
@@ -87,7 +87,7 @@ func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresou
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyCon
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Cr
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ
// overwriting an existing map entries in Annotations field with the same key.
func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CronJobApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
index 21043690d..30604ac7e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/batch/v1beta1"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
)
// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
@@ -28,7 +28,7 @@ type CronJobSpecApplyConfiguration struct {
Schedule *string `json:"schedule,omitempty"`
TimeZone *string `json:"timeZone,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
- ConcurrencyPolicy *v1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
+ ConcurrencyPolicy *batchv1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
Suspend *bool `json:"suspend,omitempty"`
JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
@@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64)
// WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConcurrencyPolicy field is set to the value of the last call.
-func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration {
+func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration {
b.ConcurrencyPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
index 5fd2485c6..4106b8e55 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
@@ -43,7 +43,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration {
// If called multiple times, the Name field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -52,7 +52,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -61,7 +61,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -70,7 +70,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp
// If called multiple times, the UID field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -79,7 +79,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -88,7 +88,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J
// If called multiple times, the Generation field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -97,7 +97,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -106,7 +106,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.T
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -115,7 +115,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -125,11 +125,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -140,11 +140,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string
// overwriting an existing map entries in Annotations field with the same key.
func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -158,7 +158,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -169,7 +169,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow
func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -191,5 +191,5 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *JobTemplateSpecApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
index e30bb6242..998e5723c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicertificatesv1 "k8s.io/api/certificates/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ certificatesv1 "k8s.io/api/certificates/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use
// with apply.
type CertificateSigningRequestApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
}
// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with
@@ -57,18 +57,18 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "")
}
// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCertificateSigningRequestStatus(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status")
}
-func extractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
+func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
b := &CertificateSigningRequestApplyConfiguration{}
err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *apicertificates
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin
// If called multiple times, the Name field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration {
+func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration {
+func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries ma
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration {
+func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values
func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *CertificateSigningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CertificateSigningRequestApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
index 7a4bfce01..a6dedcb59 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/certificates/v1"
+ certificatesv1 "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
// with apply.
type CertificateSigningRequestConditionApplyConfiguration struct {
- Type *v1.RequestConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Type *certificatesv1.RequestConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
@@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration {
+func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
index 9c4a85693..82da53c9e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/certificates/v1"
+ certificatesv1 "k8s.io/api/certificates/v1"
)
// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
- Usages []v1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]v1.ExtraValue `json:"extra,omitempty"`
+ Request []byte `json:"request,omitempty"`
+ SignerName *string `json:"signerName,omitempty"`
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ Usages []certificatesv1.KeyUsage `json:"usages,omitempty"`
+ Username *string `json:"username,omitempty"`
+ UID *string `json:"uid,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ Extra map[string]certificatesv1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
@@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(
// WithUsages adds the given value to the Usages field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Usages field.
-func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration {
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration {
for i := range values {
b.Usages = append(b.Usages, values[i])
}
@@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Extra field,
// overwriting an existing map entries in Extra field with the same key.
-func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration {
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration {
if b.Extra == nil && len(entries) > 0 {
- b.Extra = make(map[string]v1.ExtraValue, len(entries))
+ b.Extra = make(map[string]certificatesv1.ExtraValue, len(entries))
}
for k, v := range entries {
b.Extra[k] = v
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
index 9cd10bc56..6ae6b269d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
@@ -84,7 +84,7 @@ func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterT
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTr
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *Clu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTr
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *Clus
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *Cluster
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *Clus
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterTrustBundleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
index d6e08824a..a1f57f268 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
@@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1b
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin
// If called multiple times, the Name field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(valu
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values
func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CertificateSigningRequestApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
index 6e3692d1c..a845ec404 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/certificates/v1beta1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,12 +27,12 @@ import (
// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
// with apply.
type CertificateSigningRequestConditionApplyConfiguration struct {
- Type *v1beta1.RequestConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Type *certificatesv1beta1.RequestConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
@@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration {
+func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
index 9284eca3a..ee4016c76 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/certificates/v1beta1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
)
// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
- Usages []v1beta1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"`
+ Request []byte `json:"request,omitempty"`
+ SignerName *string `json:"signerName,omitempty"`
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ Usages []certificatesv1beta1.KeyUsage `json:"usages,omitempty"`
+ Username *string `json:"username,omitempty"`
+ UID *string `json:"uid,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ Extra map[string]certificatesv1beta1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
@@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(
// WithUsages adds the given value to the Usages field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Usages field.
-func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration {
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration {
for i := range values {
b.Usages = append(b.Usages, values[i])
}
@@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Extra field,
// overwriting an existing map entries in Extra field with the same key.
-func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration {
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration {
if b.Extra == nil && len(entries) > 0 {
- b.Extra = make(map[string]v1beta1.ExtraValue, len(entries))
+ b.Extra = make(map[string]certificatesv1beta1.ExtraValue, len(entries))
}
for k, v := range entries {
b.Extra[k] = v
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
index ffd84583f..2a69e773c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apicoordinationv1 "k8s.io/api/coordination/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ coordinationv1 "k8s.io/api/coordination/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use
// with apply.
type LeaseApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
}
// Lease constructs a declarative configuration of the Lease type for use with
@@ -57,18 +57,18 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractLease(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+func ExtractLease(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
return extractLease(lease, fieldManager, "")
}
// ExtractLeaseStatus is the same as ExtractLease except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractLeaseStatus(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+func ExtractLeaseStatus(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
return extractLease(lease, fieldManager, "status")
}
-func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
+func extractLease(lease *coordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
b := &LeaseApplyConfiguration{}
err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresour
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration {
+func (b *LeaseApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration {
+func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L
// overwriting an existing map entries in Labels field with the same key.
func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *Le
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration {
+func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *LeaseApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -251,5 +251,5 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) *
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *LeaseApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
index 01d0df138..d0099872c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
@@ -20,7 +20,7 @@ package v1
import (
coordinationv1 "k8s.io/api/coordination/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use
@@ -28,8 +28,8 @@ import (
type LeaseSpecApplyConfiguration struct {
HolderIdentity *string `json:"holderIdentity,omitempty"`
LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"`
- AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"`
- RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
+ AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty"`
+ RenewTime *metav1.MicroTime `json:"renewTime,omitempty"`
LeaseTransitions *int32 `json:"leaseTransitions,omitempty"`
Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
PreferredHolder *string `json:"preferredHolder,omitempty"`
@@ -60,7 +60,7 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseDurationSeconds(value int32) *Lea
// WithAcquireTime sets the AcquireTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AcquireTime field is set to the value of the last call.
-func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *LeaseSpecApplyConfiguration {
+func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration {
b.AcquireTime = &value
return b
}
@@ -68,7 +68,7 @@ func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *Lease
// WithRenewTime sets the RenewTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RenewTime field is set to the value of the last call.
-func (b *LeaseSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseSpecApplyConfiguration {
+func (b *LeaseSpecApplyConfiguration) WithRenewTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration {
b.RenewTime = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
similarity index 87%
rename from vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go
rename to vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
index ef7684779..b2cc2338e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
import (
- coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
+ coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -42,7 +42,7 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration {
b.WithName(name)
b.WithNamespace(namespace)
b.WithKind("LeaseCandidate")
- b.WithAPIVersion("coordination.k8s.io/v1alpha1")
+ b.WithAPIVersion("coordination.k8s.io/v1alpha2")
return b
}
@@ -57,20 +57,20 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
+func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
return extractLeaseCandidate(leaseCandidate, fieldManager, "")
}
// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
+func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
return extractLeaseCandidate(leaseCandidate, fieldManager, "status")
}
-func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
+func extractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
b := &LeaseCandidateApplyConfiguration{}
- err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource)
+ err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha2.LeaseCandidate"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
@@ -78,7 +78,7 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate,
b.WithNamespace(leaseCandidate.Namespace)
b.WithKind("LeaseCandidate")
- b.WithAPIVersion("coordination.k8s.io/v1alpha1")
+ b.WithAPIVersion("coordination.k8s.io/v1alpha2")
return b, nil
}
@@ -86,7 +86,7 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCa
// If called multiple times, the Name field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidat
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *Lease
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCan
// If called multiple times, the UID field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandid
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *Le
// If called multiple times, the Generation field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCan
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Ti
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Ti
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.Own
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.Own
func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -251,5 +251,5 @@ func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApp
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *LeaseCandidateApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
similarity index 75%
rename from vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go
rename to vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
index 61d3dca10..f52aaab24 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
import (
coordinationv1 "k8s.io/api/coordination/v1"
@@ -26,12 +26,12 @@ import (
// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use
// with apply.
type LeaseCandidateSpecApplyConfiguration struct {
- LeaseName *string `json:"leaseName,omitempty"`
- PingTime *v1.MicroTime `json:"pingTime,omitempty"`
- RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
- BinaryVersion *string `json:"binaryVersion,omitempty"`
- EmulationVersion *string `json:"emulationVersion,omitempty"`
- PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"`
+ LeaseName *string `json:"leaseName,omitempty"`
+ PingTime *v1.MicroTime `json:"pingTime,omitempty"`
+ RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
+ BinaryVersion *string `json:"binaryVersion,omitempty"`
+ EmulationVersion *string `json:"emulationVersion,omitempty"`
+ Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
}
// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with
@@ -80,12 +80,10 @@ func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string
return b
}
-// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the PreferredStrategies field.
-func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration {
- for i := range values {
- b.PreferredStrategies = append(b.PreferredStrategies, values[i])
- }
+// WithStrategy sets the Strategy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Strategy field is set to the value of the last call.
+func (b *LeaseCandidateSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration {
+ b.Strategy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
index 9aa0703e8..b321fe6b4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
@@ -86,7 +86,7 @@ func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subreso
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfigu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Leas
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L
// overwriting an existing map entries in Labels field with the same key.
func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -251,5 +251,5 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) *
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *LeaseApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
index 1d698fd61..3f7de21b3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use
// with apply.
type AppArmorProfileApplyConfiguration struct {
- Type *v1.AppArmorProfileType `json:"type,omitempty"`
- LocalhostProfile *string `json:"localhostProfile,omitempty"`
+ Type *corev1.AppArmorProfileType `json:"type,omitempty"`
+ LocalhostProfile *string `json:"localhostProfile,omitempty"`
}
// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with
@@ -38,7 +38,7 @@ func AppArmorProfile() *AppArmorProfileApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *AppArmorProfileApplyConfiguration) WithType(value v1.AppArmorProfileType) *AppArmorProfileApplyConfiguration {
+func (b *AppArmorProfileApplyConfiguration) WithType(value corev1.AppArmorProfileType) *AppArmorProfileApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
index e4c2fff3f..2c76161a1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use
// with apply.
type AttachedVolumeApplyConfiguration struct {
- Name *v1.UniqueVolumeName `json:"name,omitempty"`
- DevicePath *string `json:"devicePath,omitempty"`
+ Name *corev1.UniqueVolumeName `json:"name,omitempty"`
+ DevicePath *string `json:"devicePath,omitempty"`
}
// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with
@@ -38,7 +38,7 @@ func AttachedVolume() *AttachedVolumeApplyConfiguration {
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
-func (b *AttachedVolumeApplyConfiguration) WithName(value v1.UniqueVolumeName) *AttachedVolumeApplyConfiguration {
+func (b *AttachedVolumeApplyConfiguration) WithName(value corev1.UniqueVolumeName) *AttachedVolumeApplyConfiguration {
b.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
index 40ad5ac78..d4d20dfa9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use
// with apply.
type AzureDiskVolumeSourceApplyConfiguration struct {
- DiskName *string `json:"diskName,omitempty"`
- DataDiskURI *string `json:"diskURI,omitempty"`
- CachingMode *v1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Kind *v1.AzureDataDiskKind `json:"kind,omitempty"`
+ DiskName *string `json:"diskName,omitempty"`
+ DataDiskURI *string `json:"diskURI,omitempty"`
+ CachingMode *corev1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"`
+ FSType *string `json:"fsType,omitempty"`
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ Kind *corev1.AzureDataDiskKind `json:"kind,omitempty"`
}
// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with
@@ -58,7 +58,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithDataDiskURI(value string)
// WithCachingMode sets the CachingMode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CachingMode field is set to the value of the last call.
-func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value v1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration {
+func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value corev1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration {
b.CachingMode = &value
return b
}
@@ -82,7 +82,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithReadOnly(value bool) *Azur
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value v1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration {
+func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value corev1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration {
b.Kind = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
index 1c463aef5..e5c52b3c1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use
// with apply.
type CapabilitiesApplyConfiguration struct {
- Add []v1.Capability `json:"add,omitempty"`
- Drop []v1.Capability `json:"drop,omitempty"`
+ Add []corev1.Capability `json:"add,omitempty"`
+ Drop []corev1.Capability `json:"drop,omitempty"`
}
// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with
@@ -38,7 +38,7 @@ func Capabilities() *CapabilitiesApplyConfiguration {
// WithAdd adds the given value to the Add field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Add field.
-func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *CapabilitiesApplyConfiguration {
+func (b *CapabilitiesApplyConfiguration) WithAdd(values ...corev1.Capability) *CapabilitiesApplyConfiguration {
for i := range values {
b.Add = append(b.Add, values[i])
}
@@ -48,7 +48,7 @@ func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *Capab
// WithDrop adds the given value to the Drop field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Drop field.
-func (b *CapabilitiesApplyConfiguration) WithDrop(values ...v1.Capability) *CapabilitiesApplyConfiguration {
+func (b *CapabilitiesApplyConfiguration) WithDrop(values ...corev1.Capability) *CapabilitiesApplyConfiguration {
for i := range values {
b.Drop = append(b.Drop, values[i])
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
index bcfbac63e..ab1c578c8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use
// with apply.
type ClusterTrustBundleProjectionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
- Optional *bool `json:"optional,omitempty"`
- Path *string `json:"path,omitempty"`
+ Name *string `json:"name,omitempty"`
+ SignerName *string `json:"signerName,omitempty"`
+ LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
+ Optional *bool `json:"optional,omitempty"`
+ Path *string `json:"path,omitempty"`
}
// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with
@@ -57,7 +57,7 @@ func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value st
// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LabelSelector field is set to the value of the last call.
-func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration {
+func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration {
b.LabelSelector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
index 0044c7c0b..60be6fe80 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use
// with apply.
type ComponentConditionApplyConfiguration struct {
- Type *v1.ComponentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- Message *string `json:"message,omitempty"`
- Error *string `json:"error,omitempty"`
+ Type *corev1.ComponentConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Error *string `json:"error,omitempty"`
}
// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with
@@ -40,7 +40,7 @@ func ComponentCondition() *ComponentConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentConditionType) *ComponentConditionApplyConfiguration {
+func (b *ComponentConditionApplyConfiguration) WithType(value corev1.ComponentConditionType) *ComponentConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -48,7 +48,7 @@ func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentCondit
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ComponentConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ComponentConditionApplyConfiguration {
+func (b *ComponentConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ComponentConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
index 195bde721..340a55e2d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use
// with apply.
type ComponentStatusApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with
@@ -56,18 +56,18 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
+func ExtractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
return extractComponentStatus(componentStatus, fieldManager, "")
}
// ExtractComponentStatusStatus is the same as ExtractComponentStatus except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractComponentStatusStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
+func ExtractComponentStatusStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
return extractComponentStatus(componentStatus, fieldManager, "status")
}
-func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) {
+func extractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) {
b := &ComponentStatusApplyConfiguration{}
err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldMan
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentStatusApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentSta
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *ComponentStatusApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *Compon
// If called multiple times, the Name field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentSta
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *Comp
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *Compone
// If called multiple times, the UID field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentS
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *C
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithGeneration(value int64) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration {
+func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration {
+func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.T
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string
// overwriting an existing map entries in Annotations field with the same key.
func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]s
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration {
+func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.Ow
func (b *ComponentStatusApplyConfiguration) WithFinalizers(values ...string) *ComponentStatusApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ComponentStatusApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -254,5 +254,5 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ComponentStatusApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
index 576b7a3d6..2ff2c4d20 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
@@ -20,21 +20,21 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use
// with apply.
type ConfigMapApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Immutable *bool `json:"immutable,omitempty"`
- Data map[string]string `json:"data,omitempty"`
- BinaryData map[string][]byte `json:"binaryData,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Immutable *bool `json:"immutable,omitempty"`
+ Data map[string]string `json:"data,omitempty"`
+ BinaryData map[string][]byte `json:"binaryData,omitempty"`
}
// ConfigMap constructs a declarative configuration of the ConfigMap type for use with
@@ -88,7 +88,7 @@ func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresou
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,25 +150,25 @@ func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigM
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithGeneration(value int64) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConfigMapApplyConfiguration {
+func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConfigMapApplyConfiguration {
+func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *Con
// overwriting an existing map entries in Annotations field with the same key.
func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -214,13 +214,13 @@ func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration {
+func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,14 +231,14 @@ func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *ConfigMapApplyConfiguration) WithFinalizers(values ...string) *ConfigMapApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ConfigMapApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -281,5 +281,5 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte)
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ConfigMapApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
index b1fccd700..4c0d2cbdd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
@@ -35,7 +35,7 @@ func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ConfigMapEnvSourceApplyConfiguration) WithName(value string) *ConfigMapEnvSourceApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
index 26c2a75b5..97c0e7210 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
@@ -36,7 +36,7 @@ func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ConfigMapKeySelectorApplyConfiguration) WithName(value string) *ConfigMapKeySelectorApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
index 308b28f57..d8c5e21d3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
@@ -36,7 +36,7 @@ func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ConfigMapProjectionApplyConfiguration) WithName(value string) *ConfigMapProjectionApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
index 8e0e8dc0f..b5f410397 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
@@ -37,7 +37,7 @@ func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *ConfigMapVolumeSourceApplyConfiguration) WithName(value string) *ConfigMapVolumeSourceApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
index 7acc0638f..2ad47b3a9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use
// with apply.
type ContainerPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- HostPort *int32 `json:"hostPort,omitempty"`
- ContainerPort *int32 `json:"containerPort,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- HostIP *string `json:"hostIP,omitempty"`
+ Name *string `json:"name,omitempty"`
+ HostPort *int32 `json:"hostPort,omitempty"`
+ ContainerPort *int32 `json:"containerPort,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ HostIP *string `json:"hostIP,omitempty"`
}
// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with
@@ -65,7 +65,7 @@ func (b *ContainerPortApplyConfiguration) WithContainerPort(value int32) *Contai
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *ContainerPortApplyConfiguration) WithProtocol(value v1.Protocol) *ContainerPortApplyConfiguration {
+func (b *ContainerPortApplyConfiguration) WithProtocol(value corev1.Protocol) *ContainerPortApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
index ea60e3d98..d45dbceaf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use
// with apply.
type ContainerResizePolicyApplyConfiguration struct {
- ResourceName *v1.ResourceName `json:"resourceName,omitempty"`
- RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"`
+ ResourceName *corev1.ResourceName `json:"resourceName,omitempty"`
+ RestartPolicy *corev1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"`
}
// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with
@@ -38,7 +38,7 @@ func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration {
// WithResourceName sets the ResourceName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceName field is set to the value of the last call.
-func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration {
+func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value corev1.ResourceName) *ContainerResizePolicyApplyConfiguration {
b.ResourceName = &value
return b
}
@@ -46,7 +46,7 @@ func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.Reso
// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RestartPolicy field is set to the value of the last call.
-func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration {
+func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value corev1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration {
b.RestartPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
index 6eec9f7f2..0ed59c177 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use
// with apply.
type ContainerStateRunningApplyConfiguration struct {
- StartedAt *v1.Time `json:"startedAt,omitempty"`
+ StartedAt *metav1.Time `json:"startedAt,omitempty"`
}
// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with
@@ -37,7 +37,7 @@ func ContainerStateRunning() *ContainerStateRunningApplyConfiguration {
// WithStartedAt sets the StartedAt field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StartedAt field is set to the value of the last call.
-func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateRunningApplyConfiguration {
+func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateRunningApplyConfiguration {
b.StartedAt = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
index b067aa211..cfadd93c9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use
// with apply.
type ContainerStateTerminatedApplyConfiguration struct {
- ExitCode *int32 `json:"exitCode,omitempty"`
- Signal *int32 `json:"signal,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- StartedAt *v1.Time `json:"startedAt,omitempty"`
- FinishedAt *v1.Time `json:"finishedAt,omitempty"`
- ContainerID *string `json:"containerID,omitempty"`
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ Signal *int32 `json:"signal,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
+ StartedAt *metav1.Time `json:"startedAt,omitempty"`
+ FinishedAt *metav1.Time `json:"finishedAt,omitempty"`
+ ContainerID *string `json:"containerID,omitempty"`
}
// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with
@@ -75,7 +75,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithMessage(value string) *
// WithStartedAt sets the StartedAt field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StartedAt field is set to the value of the last call.
-func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration {
+func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration {
b.StartedAt = &value
return b
}
@@ -83,7 +83,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time
// WithFinishedAt sets the FinishedAt field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FinishedAt field is set to the value of the last call.
-func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration {
+func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration {
b.FinishedAt = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
index a619fdb07..63e9f56ab 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
)
// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use
// with apply.
type EmptyDirVolumeSourceApplyConfiguration struct {
- Medium *v1.StorageMedium `json:"medium,omitempty"`
- SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
+ Medium *corev1.StorageMedium `json:"medium,omitempty"`
+ SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
}
// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with
@@ -39,7 +39,7 @@ func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration {
// WithMedium sets the Medium field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Medium field is set to the value of the last call.
-func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value v1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration {
+func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value corev1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration {
b.Medium = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
index d0d96230c..05ee64ddc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
// with apply.
type EndpointPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Port *int32 `json:"port,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ AppProtocol *string `json:"appProtocol,omitempty"`
}
// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
@@ -56,7 +56,7 @@ func (b *EndpointPortApplyConfiguration) WithPort(value int32) *EndpointPortAppl
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration {
+func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
index 98dc69aaa..d2f910196 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use
// with apply.
type EndpointsApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"`
}
// Endpoints constructs a declarative configuration of the Endpoints type for use with
@@ -57,18 +57,18 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
+func ExtractEndpoints(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
return extractEndpoints(endpoints, fieldManager, "")
}
// ExtractEndpointsStatus is the same as ExtractEndpoints except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractEndpointsStatus(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
+func ExtractEndpointsStatus(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
return extractEndpoints(endpoints, fieldManager, "status")
}
-func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) {
+func extractEndpoints(endpoints *corev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) {
b := &EndpointsApplyConfiguration{}
err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subre
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *Endpoin
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithGeneration(value int64) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointsApplyConfiguration {
+func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointsApplyConfiguration {
+func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *End
// overwriting an existing map entries in Annotations field with the same key.
func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration {
+func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *EndpointsApplyConfiguration) WithFinalizers(values ...string) *EndpointsApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *EndpointsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -256,5 +256,5 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EndpointsApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
index a15ac6ec3..4b74439fc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
@@ -39,7 +39,7 @@ func EphemeralContainer() *EphemeralContainerApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithName(value string) *EphemeralContainerApplyConfiguration {
- b.Name = &value
+ b.EphemeralContainerCommonApplyConfiguration.Name = &value
return b
}
@@ -47,7 +47,7 @@ func (b *EphemeralContainerApplyConfiguration) WithName(value string) *Ephemeral
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Image field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *EphemeralContainerApplyConfiguration {
- b.Image = &value
+ b.EphemeralContainerCommonApplyConfiguration.Image = &value
return b
}
@@ -56,7 +56,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *Ephemera
// If called multiple times, values provided by each call will be appended to the Command field.
func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *EphemeralContainerApplyConfiguration {
for i := range values {
- b.Command = append(b.Command, values[i])
+ b.EphemeralContainerCommonApplyConfiguration.Command = append(b.EphemeralContainerCommonApplyConfiguration.Command, values[i])
}
return b
}
@@ -66,7 +66,7 @@ func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *Ep
// If called multiple times, values provided by each call will be appended to the Args field.
func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *EphemeralContainerApplyConfiguration {
for i := range values {
- b.Args = append(b.Args, values[i])
+ b.EphemeralContainerCommonApplyConfiguration.Args = append(b.EphemeralContainerCommonApplyConfiguration.Args, values[i])
}
return b
}
@@ -75,7 +75,7 @@ func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *Ephem
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WorkingDir field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithWorkingDir(value string) *EphemeralContainerApplyConfiguration {
- b.WorkingDir = &value
+ b.EphemeralContainerCommonApplyConfiguration.WorkingDir = &value
return b
}
@@ -87,7 +87,7 @@ func (b *EphemeralContainerApplyConfiguration) WithPorts(values ...*ContainerPor
if values[i] == nil {
panic("nil value passed to WithPorts")
}
- b.Ports = append(b.Ports, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.Ports = append(b.EphemeralContainerCommonApplyConfiguration.Ports, *values[i])
}
return b
}
@@ -100,7 +100,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnvFrom(values ...*EnvFromSou
if values[i] == nil {
panic("nil value passed to WithEnvFrom")
}
- b.EnvFrom = append(b.EnvFrom, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.EnvFrom = append(b.EphemeralContainerCommonApplyConfiguration.EnvFrom, *values[i])
}
return b
}
@@ -113,7 +113,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon
if values[i] == nil {
panic("nil value passed to WithEnv")
}
- b.Env = append(b.Env, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.Env = append(b.EphemeralContainerCommonApplyConfiguration.Env, *values[i])
}
return b
}
@@ -122,7 +122,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resources field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.Resources = value
+ b.EphemeralContainerCommonApplyConfiguration.Resources = value
return b
}
@@ -134,7 +134,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta
if values[i] == nil {
panic("nil value passed to WithResizePolicy")
}
- b.ResizePolicy = append(b.ResizePolicy, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.ResizePolicy = append(b.EphemeralContainerCommonApplyConfiguration.ResizePolicy, *values[i])
}
return b
}
@@ -143,7 +143,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RestartPolicy field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration {
- b.RestartPolicy = &value
+ b.EphemeralContainerCommonApplyConfiguration.RestartPolicy = &value
return b
}
@@ -155,7 +155,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeMounts(values ...*Volum
if values[i] == nil {
panic("nil value passed to WithVolumeMounts")
}
- b.VolumeMounts = append(b.VolumeMounts, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.VolumeMounts = append(b.EphemeralContainerCommonApplyConfiguration.VolumeMounts, *values[i])
}
return b
}
@@ -168,7 +168,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu
if values[i] == nil {
panic("nil value passed to WithVolumeDevices")
}
- b.VolumeDevices = append(b.VolumeDevices, *values[i])
+ b.EphemeralContainerCommonApplyConfiguration.VolumeDevices = append(b.EphemeralContainerCommonApplyConfiguration.VolumeDevices, *values[i])
}
return b
}
@@ -177,7 +177,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LivenessProbe field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.LivenessProbe = value
+ b.EphemeralContainerCommonApplyConfiguration.LivenessProbe = value
return b
}
@@ -185,7 +185,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ReadinessProbe field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.ReadinessProbe = value
+ b.EphemeralContainerCommonApplyConfiguration.ReadinessProbe = value
return b
}
@@ -193,7 +193,7 @@ func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeAp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StartupProbe field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.StartupProbe = value
+ b.EphemeralContainerCommonApplyConfiguration.StartupProbe = value
return b
}
@@ -201,7 +201,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeAppl
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Lifecycle field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.Lifecycle = value
+ b.EphemeralContainerCommonApplyConfiguration.Lifecycle = value
return b
}
@@ -209,7 +209,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the TerminationMessagePath field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value string) *EphemeralContainerApplyConfiguration {
- b.TerminationMessagePath = &value
+ b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePath = &value
return b
}
@@ -217,7 +217,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the TerminationMessagePolicy field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(value corev1.TerminationMessagePolicy) *EphemeralContainerApplyConfiguration {
- b.TerminationMessagePolicy = &value
+ b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePolicy = &value
return b
}
@@ -225,7 +225,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(valu
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ImagePullPolicy field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *EphemeralContainerApplyConfiguration {
- b.ImagePullPolicy = &value
+ b.EphemeralContainerCommonApplyConfiguration.ImagePullPolicy = &value
return b
}
@@ -233,7 +233,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1.
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SecurityContext field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *SecurityContextApplyConfiguration) *EphemeralContainerApplyConfiguration {
- b.SecurityContext = value
+ b.EphemeralContainerCommonApplyConfiguration.SecurityContext = value
return b
}
@@ -241,7 +241,7 @@ func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *Securi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Stdin field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralContainerApplyConfiguration {
- b.Stdin = &value
+ b.EphemeralContainerCommonApplyConfiguration.Stdin = &value
return b
}
@@ -249,7 +249,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralC
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StdinOnce field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *EphemeralContainerApplyConfiguration {
- b.StdinOnce = &value
+ b.EphemeralContainerCommonApplyConfiguration.StdinOnce = &value
return b
}
@@ -257,7 +257,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *Epheme
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the TTY field is set to the value of the last call.
func (b *EphemeralContainerApplyConfiguration) WithTTY(value bool) *EphemeralContainerApplyConfiguration {
- b.TTY = &value
+ b.EphemeralContainerCommonApplyConfiguration.TTY = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
index 65d6577ab..9496ea773 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
@@ -19,33 +19,33 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// EventApplyConfiguration represents a declarative configuration of the Event type for use
// with apply.
type EventApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- Source *EventSourceApplyConfiguration `json:"source,omitempty"`
- FirstTimestamp *metav1.Time `json:"firstTimestamp,omitempty"`
- LastTimestamp *metav1.Time `json:"lastTimestamp,omitempty"`
- Count *int32 `json:"count,omitempty"`
- Type *string `json:"type,omitempty"`
- EventTime *metav1.MicroTime `json:"eventTime,omitempty"`
- Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
- Action *string `json:"action,omitempty"`
- Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"`
- ReportingController *string `json:"reportingComponent,omitempty"`
- ReportingInstance *string `json:"reportingInstance,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Source *EventSourceApplyConfiguration `json:"source,omitempty"`
+ FirstTimestamp *apismetav1.Time `json:"firstTimestamp,omitempty"`
+ LastTimestamp *apismetav1.Time `json:"lastTimestamp,omitempty"`
+ Count *int32 `json:"count,omitempty"`
+ Type *string `json:"type,omitempty"`
+ EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
+ Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
+ Action *string `json:"action,omitempty"`
+ Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"`
+ ReportingController *string `json:"reportingComponent,omitempty"`
+ ReportingInstance *string `json:"reportingInstance,omitempty"`
}
// Event constructs a declarative configuration of the Event type for use with
@@ -70,18 +70,18 @@ func Event(name, namespace string) *EventApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractEvent(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+func ExtractEvent(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
return extractEvent(event, fieldManager, "")
}
// ExtractEventStatus is the same as ExtractEvent except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractEventStatus(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+func ExtractEventStatus(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
return extractEvent(event, fieldManager, "status")
}
-func extractEvent(event *apicorev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
+func extractEvent(event *corev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource)
if err != nil {
@@ -99,7 +99,7 @@ func extractEvent(event *apicorev1.Event, fieldManager string, subresource strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -107,7 +107,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -116,7 +116,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -125,7 +125,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -134,7 +134,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -143,7 +143,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -152,7 +152,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -161,25 +161,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -188,7 +188,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -198,11 +198,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E
// overwriting an existing map entries in Labels field with the same key.
func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -213,11 +213,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -225,13 +225,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -242,14 +242,14 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -288,7 +288,7 @@ func (b *EventApplyConfiguration) WithSource(value *EventSourceApplyConfiguratio
// WithFirstTimestamp sets the FirstTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FirstTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.FirstTimestamp = &value
return b
}
@@ -296,7 +296,7 @@ func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventAp
// WithLastTimestamp sets the LastTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithLastTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithLastTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.LastTimestamp = &value
return b
}
@@ -320,7 +320,7 @@ func (b *EventApplyConfiguration) WithType(value string) *EventApplyConfiguratio
// WithEventTime sets the EventTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the EventTime field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration {
b.EventTime = &value
return b
}
@@ -368,5 +368,5 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EventApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
index 18069c0d1..c90954bcc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
// with apply.
type EventSeriesApplyConfiguration struct {
- Count *int32 `json:"count,omitempty"`
- LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
+ Count *int32 `json:"count,omitempty"`
+ LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"`
}
// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with
@@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply
// WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastObservedTime field is set to the value of the last call.
-func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration {
+func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration {
b.LastObservedTime = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
index 10dfedfde..6a41d67cd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use
// with apply.
type HostPathVolumeSourceApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- Type *v1.HostPathType `json:"type,omitempty"`
+ Path *string `json:"path,omitempty"`
+ Type *corev1.HostPathType `json:"type,omitempty"`
}
// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with
@@ -46,7 +46,7 @@ func (b *HostPathVolumeSourceApplyConfiguration) WithPath(value string) *HostPat
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *HostPathVolumeSourceApplyConfiguration) WithType(value v1.HostPathType) *HostPathVolumeSourceApplyConfiguration {
+func (b *HostPathVolumeSourceApplyConfiguration) WithType(value corev1.HostPathType) *HostPathVolumeSourceApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
index 5ecbc27fe..ca61c5ae2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
@@ -29,7 +29,7 @@ type HTTPGetActionApplyConfiguration struct {
Path *string `json:"path,omitempty"`
Port *intstr.IntOrString `json:"port,omitempty"`
Host *string `json:"host,omitempty"`
- Scheme *v1.URIScheme `json:"scheme,omitempty"`
+ Scheme *corev1.URIScheme `json:"scheme,omitempty"`
HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"`
}
@@ -66,7 +66,7 @@ func (b *HTTPGetActionApplyConfiguration) WithHost(value string) *HTTPGetActionA
// WithScheme sets the Scheme field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Scheme field is set to the value of the last call.
-func (b *HTTPGetActionApplyConfiguration) WithScheme(value v1.URIScheme) *HTTPGetActionApplyConfiguration {
+func (b *HTTPGetActionApplyConfiguration) WithScheme(value corev1.URIScheme) *HTTPGetActionApplyConfiguration {
b.Scheme = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
index 340f15040..9a146e685 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use
// with apply.
type ImageVolumeSourceApplyConfiguration struct {
- Reference *string `json:"reference,omitempty"`
- PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"`
+ Reference *string `json:"reference,omitempty"`
+ PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty"`
}
// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with
@@ -46,7 +46,7 @@ func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *Image
// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PullPolicy field is set to the value of the last call.
-func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration {
+func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value corev1.PullPolicy) *ImageVolumeSourceApplyConfiguration {
b.PullPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
index 7770200a0..517cc4cd3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use
// with apply.
type LimitRangeApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"`
}
// LimitRange constructs a declarative configuration of the LimitRange type for use with
@@ -57,18 +57,18 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
+func ExtractLimitRange(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
return extractLimitRange(limitRange, fieldManager, "")
}
// ExtractLimitRangeStatus is the same as ExtractLimitRange except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractLimitRangeStatus(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
+func ExtractLimitRangeStatus(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
return extractLimitRange(limitRange, fieldManager, "status")
}
-func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) {
+func extractLimitRange(limitRange *corev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) {
b := &LimitRangeApplyConfiguration{}
err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, su
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeA
// If called multiple times, the Name field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRang
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitR
// If called multiple times, the Generation field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithGeneration(value int64) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LimitRangeApplyConfiguration {
+func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LimitRangeApplyConfiguration {
+func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *Li
// overwriting an existing map entries in Annotations field with the same key.
func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration {
+func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *LimitRangeApplyConfiguration) WithFinalizers(values ...string) *LimitRangeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *LimitRangeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -251,5 +251,5 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *LimitRangeApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
index 61d8344e8..5ad8ac0e6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use
// with apply.
type LimitRangeItemApplyConfiguration struct {
- Type *v1.LimitType `json:"type,omitempty"`
- Max *v1.ResourceList `json:"max,omitempty"`
- Min *v1.ResourceList `json:"min,omitempty"`
- Default *v1.ResourceList `json:"default,omitempty"`
- DefaultRequest *v1.ResourceList `json:"defaultRequest,omitempty"`
- MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"`
+ Type *corev1.LimitType `json:"type,omitempty"`
+ Max *corev1.ResourceList `json:"max,omitempty"`
+ Min *corev1.ResourceList `json:"min,omitempty"`
+ Default *corev1.ResourceList `json:"default,omitempty"`
+ DefaultRequest *corev1.ResourceList `json:"defaultRequest,omitempty"`
+ MaxLimitRequestRatio *corev1.ResourceList `json:"maxLimitRequestRatio,omitempty"`
}
// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with
@@ -42,7 +42,7 @@ func LimitRangeItem() *LimitRangeItemApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithType(value corev1.LimitType) *LimitRangeItemApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRa
// WithMax sets the Max field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Max field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithMax(value corev1.ResourceList) *LimitRangeItemApplyConfiguration {
b.Max = &value
return b
}
@@ -58,7 +58,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *Limit
// WithMin sets the Min field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Min field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithMin(value corev1.ResourceList) *LimitRangeItemApplyConfiguration {
b.Min = &value
return b
}
@@ -66,7 +66,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *Limit
// WithDefault sets the Default field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Default field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithDefault(value corev1.ResourceList) *LimitRangeItemApplyConfiguration {
b.Default = &value
return b
}
@@ -74,7 +74,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *L
// WithDefaultRequest sets the DefaultRequest field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DefaultRequest field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceList) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value corev1.ResourceList) *LimitRangeItemApplyConfiguration {
b.DefaultRequest = &value
return b
}
@@ -82,7 +82,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceL
// WithMaxLimitRequestRatio sets the MaxLimitRequestRatio field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MaxLimitRequestRatio field is set to the value of the last call.
-func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value v1.ResourceList) *LimitRangeItemApplyConfiguration {
+func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value corev1.ResourceList) *LimitRangeItemApplyConfiguration {
b.MaxLimitRequestRatio = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
index 1a7d99815..ae5c410a2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use
@@ -27,7 +27,7 @@ import (
type LoadBalancerIngressApplyConfiguration struct {
IP *string `json:"ip,omitempty"`
Hostname *string `json:"hostname,omitempty"`
- IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"`
+ IPMode *corev1.LoadBalancerIPMode `json:"ipMode,omitempty"`
Ports []PortStatusApplyConfiguration `json:"ports,omitempty"`
}
@@ -56,7 +56,7 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load
// WithIPMode sets the IPMode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IPMode field is set to the value of the last call.
-func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration {
+func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value corev1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration {
b.IPMode = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
index 704c32165..9a1a6af2a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use
// with apply.
type ModifyVolumeStatusApplyConfiguration struct {
- TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"`
- Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"`
+ TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"`
+ Status *corev1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"`
}
// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with
@@ -46,7 +46,7 @@ func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassNa
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration {
+func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value corev1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
index 0b77af183..0aba283ce 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use
// with apply.
type NamespaceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"`
- Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"`
}
// Namespace constructs a declarative configuration of the Namespace type for use with
@@ -57,18 +57,18 @@ func Namespace(name string) *NamespaceApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractNamespace(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
+func ExtractNamespace(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
return extractNamespace(namespace, fieldManager, "")
}
// ExtractNamespaceStatus is the same as ExtractNamespace except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractNamespaceStatus(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
+func ExtractNamespaceStatus(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
return extractNamespace(namespace, fieldManager, "status")
}
-func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) {
+func extractNamespace(namespace *corev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) {
b := &NamespaceApplyConfiguration{}
err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subre
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *Namespa
// If called multiple times, the Generation field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithGeneration(value int64) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NamespaceApplyConfiguration {
+func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NamespaceApplyConfiguration {
+func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *Nam
// overwriting an existing map entries in Annotations field with the same key.
func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration {
+func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *NamespaceApplyConfiguration) WithFinalizers(values ...string) *NamespaceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *NamespaceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *NamespaceApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
index 9784c3e6f..82b4cc1ca 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use
// with apply.
type NamespaceConditionApplyConfiguration struct {
- Type *v1.NamespaceConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *corev1.NamespaceConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with
@@ -42,7 +42,7 @@ func NamespaceCondition() *NamespaceConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceConditionType) *NamespaceConditionApplyConfiguration {
+func (b *NamespaceConditionApplyConfiguration) WithType(value corev1.NamespaceConditionType) *NamespaceConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceCondit
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *NamespaceConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NamespaceConditionApplyConfiguration {
+func (b *NamespaceConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NamespaceConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
index 6d7b7f1f9..1f8fcaf9a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use
// with apply.
type NamespaceSpecApplyConfiguration struct {
- Finalizers []v1.FinalizerName `json:"finalizers,omitempty"`
+ Finalizers []corev1.FinalizerName `json:"finalizers,omitempty"`
}
// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with
@@ -37,7 +37,7 @@ func NamespaceSpec() *NamespaceSpecApplyConfiguration {
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
-func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...v1.FinalizerName) *NamespaceSpecApplyConfiguration {
+func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...corev1.FinalizerName) *NamespaceSpecApplyConfiguration {
for i := range values {
b.Finalizers = append(b.Finalizers, values[i])
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
index 314908109..1484be684 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use
// with apply.
type NamespaceStatusApplyConfiguration struct {
- Phase *v1.NamespacePhase `json:"phase,omitempty"`
+ Phase *corev1.NamespacePhase `json:"phase,omitempty"`
Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"`
}
@@ -38,7 +38,7 @@ func NamespaceStatus() *NamespaceStatusApplyConfiguration {
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
-func (b *NamespaceStatusApplyConfiguration) WithPhase(value v1.NamespacePhase) *NamespaceStatusApplyConfiguration {
+func (b *NamespaceStatusApplyConfiguration) WithPhase(value corev1.NamespacePhase) *NamespaceStatusApplyConfiguration {
b.Phase = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
index ef1339259..d365047b7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NodeApplyConfiguration represents a declarative configuration of the Node type for use
// with apply.
type NodeApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"`
- Status *NodeStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *NodeStatusApplyConfiguration `json:"status,omitempty"`
}
// Node constructs a declarative configuration of the Node type for use with
@@ -57,18 +57,18 @@ func Node(name string) *NodeApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractNode(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+func ExtractNode(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
return extractNode(node, fieldManager, "")
}
// ExtractNodeStatus is the same as ExtractNode except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractNodeStatus(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+func ExtractNodeStatus(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
return extractNode(node, fieldManager, "status")
}
-func extractNode(node *apicorev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
+func extractNode(node *corev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
b := &NodeApplyConfiguration{}
err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractNode(node *apicorev1.Node, fieldManager string, subresource string)
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfigur
// If called multiple times, the Name field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfig
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfigura
// If called multiple times, the UID field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguratio
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyCon
// If called multiple times, the Generation field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *NodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeApplyConfiguration {
+func (b *NodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeApplyConfiguration {
+func (b *NodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeA
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *No
// overwriting an existing map entries in Labels field with the same key.
func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeAppl
// overwriting an existing map entries in Annotations field with the same key.
func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *Nod
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration {
+func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration)
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *NodeApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
index a9cb036c5..779fe0e2f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use
// with apply.
type NodeAddressApplyConfiguration struct {
- Type *v1.NodeAddressType `json:"type,omitempty"`
- Address *string `json:"address,omitempty"`
+ Type *corev1.NodeAddressType `json:"type,omitempty"`
+ Address *string `json:"address,omitempty"`
}
// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with
@@ -38,7 +38,7 @@ func NodeAddress() *NodeAddressApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *NodeAddressApplyConfiguration) WithType(value v1.NodeAddressType) *NodeAddressApplyConfiguration {
+func (b *NodeAddressApplyConfiguration) WithType(value corev1.NodeAddressType) *NodeAddressApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
index a1b8ed0f3..e3a2d3bb0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use
// with apply.
type NodeConditionApplyConfiguration struct {
- Type *v1.NodeConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *corev1.NodeConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with
@@ -43,7 +43,7 @@ func NodeCondition() *NodeConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) *NodeConditionApplyConfiguration {
+func (b *NodeConditionApplyConfiguration) WithType(value corev1.NodeConditionType) *NodeConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -51,7 +51,7 @@ func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) *
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *NodeConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NodeConditionApplyConfiguration {
+func (b *NodeConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NodeConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
index 7c383e06c..4dcbc9a2e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use
// with apply.
type NodeSelectorRequirementApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *v1.NodeSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ Key *string `json:"key,omitempty"`
+ Operator *corev1.NodeSelectorOperator `json:"operator,omitempty"`
+ Values []string `json:"values,omitempty"`
}
// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with
@@ -47,7 +47,7 @@ func (b *NodeSelectorRequirementApplyConfiguration) WithKey(value string) *NodeS
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
-func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value v1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration {
+func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value corev1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration {
b.Operator = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
index 8411c57ac..3859ccd50 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use
// with apply.
type NodeStatusApplyConfiguration struct {
- Capacity *v1.ResourceList `json:"capacity,omitempty"`
- Allocatable *v1.ResourceList `json:"allocatable,omitempty"`
- Phase *v1.NodePhase `json:"phase,omitempty"`
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
+ Allocatable *corev1.ResourceList `json:"allocatable,omitempty"`
+ Phase *corev1.NodePhase `json:"phase,omitempty"`
Conditions []NodeConditionApplyConfiguration `json:"conditions,omitempty"`
Addresses []NodeAddressApplyConfiguration `json:"addresses,omitempty"`
DaemonEndpoints *NodeDaemonEndpointsApplyConfiguration `json:"daemonEndpoints,omitempty"`
NodeInfo *NodeSystemInfoApplyConfiguration `json:"nodeInfo,omitempty"`
Images []ContainerImageApplyConfiguration `json:"images,omitempty"`
- VolumesInUse []v1.UniqueVolumeName `json:"volumesInUse,omitempty"`
+ VolumesInUse []corev1.UniqueVolumeName `json:"volumesInUse,omitempty"`
VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"`
Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"`
RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"`
@@ -49,7 +49,7 @@ func NodeStatus() *NodeStatusApplyConfiguration {
// WithCapacity sets the Capacity field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Capacity field is set to the value of the last call.
-func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *NodeStatusApplyConfiguration {
+func (b *NodeStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *NodeStatusApplyConfiguration {
b.Capacity = &value
return b
}
@@ -57,7 +57,7 @@ func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *Node
// WithAllocatable sets the Allocatable field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Allocatable field is set to the value of the last call.
-func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *NodeStatusApplyConfiguration {
+func (b *NodeStatusApplyConfiguration) WithAllocatable(value corev1.ResourceList) *NodeStatusApplyConfiguration {
b.Allocatable = &value
return b
}
@@ -65,7 +65,7 @@ func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *N
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
-func (b *NodeStatusApplyConfiguration) WithPhase(value v1.NodePhase) *NodeStatusApplyConfiguration {
+func (b *NodeStatusApplyConfiguration) WithPhase(value corev1.NodePhase) *NodeStatusApplyConfiguration {
b.Phase = &value
return b
}
@@ -128,7 +128,7 @@ func (b *NodeStatusApplyConfiguration) WithImages(values ...*ContainerImageApply
// WithVolumesInUse adds the given value to the VolumesInUse field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumesInUse field.
-func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...v1.UniqueVolumeName) *NodeStatusApplyConfiguration {
+func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...corev1.UniqueVolumeName) *NodeStatusApplyConfiguration {
for i := range values {
b.VolumesInUse = append(b.VolumesInUse, values[i])
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
index 020f87411..6840c1c88 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use
// with apply.
type PersistentVolumeApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"`
}
// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with
@@ -57,18 +57,18 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
+func ExtractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
return extractPersistentVolume(persistentVolume, fieldManager, "")
}
// ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPersistentVolumeStatus(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
+func ExtractPersistentVolumeStatus(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
return extractPersistentVolume(persistentVolume, fieldManager, "status")
}
-func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) {
+func extractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) {
b := &PersistentVolumeApplyConfiguration{}
err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, field
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentVolumeApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentV
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *Persi
// If called multiple times, the Name field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentV
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *Per
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *Persis
// If called multiple times, the UID field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *Persisten
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) *
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithGeneration(value int64) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration {
+func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration {
+func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(valu
// overwriting an existing map entries in Labels field with the same key.
func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string]
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration {
+func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.O
func (b *PersistentVolumeApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PersistentVolumeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PersistentVolumeApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
index 81cf79144..93b8b69d4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use
// with apply.
type PersistentVolumeClaimApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"`
}
// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with
@@ -58,18 +58,18 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
+func ExtractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "")
}
// ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
+func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status")
}
-func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) {
+func extractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) {
b := &PersistentVolumeClaimApplyConfiguration{}
err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVol
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *PersistentVolumeClaimApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *Persis
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeClaimApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) *
// If called multiple times, the Name field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *Persis
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string)
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *P
// If called multiple times, the UID field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *Pers
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value stri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration {
+func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration {
+func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value me
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds
// overwriting an existing map entries in Labels field with the same key.
func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string]
// overwriting an existing map entries in Annotations field with the same key.
func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[st
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration {
+func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...
func (b *PersistentVolumeClaimApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PersistentVolumeClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
index 80038c067..40025d533 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use
// with apply.
type PersistentVolumeClaimConditionApplyConfiguration struct {
- Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *corev1.PersistentVolumeClaimConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with
@@ -43,7 +43,7 @@ func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration {
+func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value corev1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.Per
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration {
+func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
index 5ce671cd9..2c2be16b3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use
// with apply.
type PersistentVolumeClaimSpecApplyConfiguration struct {
- AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
VolumeName *string `json:"volumeName,omitempty"`
StorageClassName *string `json:"storageClassName,omitempty"`
- VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"`
+ VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"`
DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"`
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
@@ -46,7 +46,7 @@ func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration {
// WithAccessModes adds the given value to the AccessModes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the AccessModes field.
-func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration {
+func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration {
for i := range values {
b.AccessModes = append(b.AccessModes, values[i])
}
@@ -88,7 +88,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithStorageClassName(value
// WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VolumeMode field is set to the value of the last call.
-func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration {
+func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration {
b.VolumeMode = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
index 3eebf95ad..6cea23a2c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use
// with apply.
type PersistentVolumeClaimStatusApplyConfiguration struct {
- Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
- AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
- Capacity *v1.ResourceList `json:"capacity,omitempty"`
+ Phase *corev1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"`
- AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"`
- AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"`
+ AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
+ AllocatedResourceStatuses map[corev1.ResourceName]corev1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"`
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"`
ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"`
}
@@ -44,7 +44,7 @@ func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguratio
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration {
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration {
b.Phase = &value
return b
}
@@ -52,7 +52,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.Persi
// WithAccessModes adds the given value to the AccessModes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the AccessModes field.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration {
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration {
for i := range values {
b.AccessModes = append(b.AccessModes, values[i])
}
@@ -62,7 +62,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values .
// WithCapacity sets the Capacity field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Capacity field is set to the value of the last call.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration {
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration {
b.Capacity = &value
return b
}
@@ -83,7 +83,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithConditions(values ..
// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AllocatedResources field is set to the value of the last call.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration {
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration {
b.AllocatedResources = &value
return b
}
@@ -92,9 +92,9 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field,
// overwriting an existing map entries in AllocatedResourceStatuses field with the same key.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration {
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[corev1.ResourceName]corev1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration {
if b.AllocatedResourceStatuses == nil && len(entries) > 0 {
- b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries))
+ b.AllocatedResourceStatuses = make(map[corev1.ResourceName]corev1.ClaimResourceStatus, len(entries))
}
for k, v := range entries {
b.AllocatedResourceStatuses[k] = v
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
index ed4970291..4db3cbf12 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use
// with apply.
type PersistentVolumeClaimTemplateApplyConfiguration struct {
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
}
// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with
@@ -42,7 +42,7 @@ func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfigur
// If called multiple times, the Name field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string)
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -60,7 +60,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -69,7 +69,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value st
// If called multiple times, the UID field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -78,7 +78,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UI
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -87,25 +87,25 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(va
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration {
+func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration {
+func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -114,7 +114,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -124,11 +124,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePerio
// overwriting an existing map entries in Labels field with the same key.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -139,11 +139,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map
// overwriting an existing map entries in Annotations field with the same key.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -151,13 +151,13 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entrie
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration {
+func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -168,14 +168,14 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(va
func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PersistentVolumeClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -190,5 +190,5 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
index 074fa55d1..792e3b944 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use
// with apply.
type PersistentVolumeSpecApplyConfiguration struct {
- Capacity *v1.ResourceList `json:"capacity,omitempty"`
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
PersistentVolumeSourceApplyConfiguration `json:",inline"`
- AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
ClaimRef *ObjectReferenceApplyConfiguration `json:"claimRef,omitempty"`
- PersistentVolumeReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
+ PersistentVolumeReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
StorageClassName *string `json:"storageClassName,omitempty"`
MountOptions []string `json:"mountOptions,omitempty"`
- VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"`
+ VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"`
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
}
@@ -46,7 +46,7 @@ func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration {
// WithCapacity sets the Capacity field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Capacity field is set to the value of the last call.
-func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeSpecApplyConfiguration {
+func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeSpecApplyConfiguration {
b.Capacity = &value
return b
}
@@ -55,7 +55,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceL
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GCEPersistentDisk field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.GCEPersistentDisk = value
+ b.PersistentVolumeSourceApplyConfiguration.GCEPersistentDisk = value
return b
}
@@ -63,7 +63,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GC
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AWSElasticBlockStore field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.AWSElasticBlockStore = value
+ b.PersistentVolumeSourceApplyConfiguration.AWSElasticBlockStore = value
return b
}
@@ -71,7 +71,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HostPath field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.HostPath = value
+ b.PersistentVolumeSourceApplyConfiguration.HostPath = value
return b
}
@@ -79,7 +79,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVol
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Glusterfs field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.Glusterfs = value
+ b.PersistentVolumeSourceApplyConfiguration.Glusterfs = value
return b
}
@@ -87,7 +87,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsP
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NFS field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.NFS = value
+ b.PersistentVolumeSourceApplyConfiguration.NFS = value
return b
}
@@ -95,7 +95,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RBD field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.RBD = value
+ b.PersistentVolumeSourceApplyConfiguration.RBD = value
return b
}
@@ -103,7 +103,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVol
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ISCSI field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.ISCSI = value
+ b.PersistentVolumeSourceApplyConfiguration.ISCSI = value
return b
}
@@ -111,7 +111,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersisten
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Cinder field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.Cinder = value
+ b.PersistentVolumeSourceApplyConfiguration.Cinder = value
return b
}
@@ -119,7 +119,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersist
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CephFS field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.CephFS = value
+ b.PersistentVolumeSourceApplyConfiguration.CephFS = value
return b
}
@@ -127,7 +127,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersist
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FC field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.FC = value
+ b.PersistentVolumeSourceApplyConfiguration.FC = value
return b
}
@@ -135,7 +135,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Flocker field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.Flocker = value
+ b.PersistentVolumeSourceApplyConfiguration.Flocker = value
return b
}
@@ -143,7 +143,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolum
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FlexVolume field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.FlexVolume = value
+ b.PersistentVolumeSourceApplyConfiguration.FlexVolume = value
return b
}
@@ -151,7 +151,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AzureFile field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFilePersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.AzureFile = value
+ b.PersistentVolumeSourceApplyConfiguration.AzureFile = value
return b
}
@@ -159,7 +159,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFileP
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VsphereVolume field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.VsphereVolume = value
+ b.PersistentVolumeSourceApplyConfiguration.VsphereVolume = value
return b
}
@@ -167,7 +167,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *Vspher
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Quobyte field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.Quobyte = value
+ b.PersistentVolumeSourceApplyConfiguration.Quobyte = value
return b
}
@@ -175,7 +175,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolum
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AzureDisk field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.AzureDisk = value
+ b.PersistentVolumeSourceApplyConfiguration.AzureDisk = value
return b
}
@@ -183,7 +183,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskV
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PhotonPersistentDisk field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.PhotonPersistentDisk = value
+ b.PersistentVolumeSourceApplyConfiguration.PhotonPersistentDisk = value
return b
}
@@ -191,7 +191,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PortworxVolume field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.PortworxVolume = value
+ b.PersistentVolumeSourceApplyConfiguration.PortworxVolume = value
return b
}
@@ -199,7 +199,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *Portw
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ScaleIO field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.ScaleIO = value
+ b.PersistentVolumeSourceApplyConfiguration.ScaleIO = value
return b
}
@@ -207,7 +207,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Local field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.Local = value
+ b.PersistentVolumeSourceApplyConfiguration.Local = value
return b
}
@@ -215,7 +215,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSou
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StorageOS field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.StorageOS = value
+ b.PersistentVolumeSourceApplyConfiguration.StorageOS = value
return b
}
@@ -223,14 +223,14 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSP
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CSI field is set to the value of the last call.
func (b *PersistentVolumeSpecApplyConfiguration) WithCSI(value *CSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration {
- b.CSI = value
+ b.PersistentVolumeSourceApplyConfiguration.CSI = value
return b
}
// WithAccessModes adds the given value to the AccessModes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the AccessModes field.
-func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration {
+func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration {
for i := range values {
b.AccessModes = append(b.AccessModes, values[i])
}
@@ -248,7 +248,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithClaimRef(value *ObjectRefer
// WithPersistentVolumeReclaimPolicy sets the PersistentVolumeReclaimPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PersistentVolumeReclaimPolicy field is set to the value of the last call.
-func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value v1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration {
+func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value corev1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration {
b.PersistentVolumeReclaimPolicy = &value
return b
}
@@ -274,7 +274,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithMountOptions(values ...stri
// WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VolumeMode field is set to the value of the last call.
-func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration {
+func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration {
b.VolumeMode = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
index 95ba90f48..0bb077ae0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use
// with apply.
type PersistentVolumeStatusApplyConfiguration struct {
- Phase *v1.PersistentVolumePhase `json:"phase,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *string `json:"reason,omitempty"`
- LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"`
+ Phase *corev1.PersistentVolumePhase `json:"phase,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"`
}
// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with
@@ -41,7 +41,7 @@ func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration {
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
-func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value v1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration {
+func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration {
b.Phase = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
index 507d57d6f..29526709f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodApplyConfiguration represents a declarative configuration of the Pod type for use
// with apply.
type PodApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *PodStatusApplyConfiguration `json:"status,omitempty"`
}
// Pod constructs a declarative configuration of the Pod type for use with
@@ -58,18 +58,18 @@ func Pod(name, namespace string) *PodApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPod(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+func ExtractPod(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
return extractPod(pod, fieldManager, "")
}
// ExtractPodStatus is the same as ExtractPod except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPodStatus(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+func ExtractPodStatus(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
return extractPod(pod, fieldManager, "status")
}
-func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) {
+func extractPod(pod *corev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) {
b := &PodApplyConfiguration{}
err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*P
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfigurat
// If called multiple times, the Name field is set to the value of the last call.
func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration {
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfigur
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfigurati
// If called multiple times, the UID field is set to the value of the last call.
func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfi
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PodApplyConfiguration) WithGeneration(value int64) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PodApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodApplyConfiguration {
+func (b *PodApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApplyConfiguration {
+func (b *PodApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApp
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Pod
// overwriting an existing map entries in Labels field with the same key.
func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyC
// overwriting an existing map entries in Annotations field with the same key.
func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodA
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration {
+func (b *PodApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference
func (b *PodApplyConfiguration) WithFinalizers(values ...string) *PodApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PodApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) *
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PodApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
index 3afce026d..1cc1ca0d0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use
// with apply.
type PodAffinityTermApplyConfiguration struct {
- LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
- TopologyKey *string `json:"topologyKey,omitempty"`
- NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
- MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"`
+ LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
+ Namespaces []string `json:"namespaces,omitempty"`
+ TopologyKey *string `json:"topologyKey,omitempty"`
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
+ MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"`
}
// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with
@@ -42,7 +42,7 @@ func PodAffinityTerm() *PodAffinityTermApplyConfiguration {
// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LabelSelector field is set to the value of the last call.
-func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration {
+func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration {
b.LabelSelector = value
return b
}
@@ -68,7 +68,7 @@ func (b *PodAffinityTermApplyConfiguration) WithTopologyKey(value string) *PodAf
// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamespaceSelector field is set to the value of the last call.
-func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration {
+func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration {
b.NamespaceSelector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
index 98968d26d..67cd1bd09 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use
// with apply.
type PodConditionApplyConfiguration struct {
- Type *v1.PodConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *corev1.PodConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with
@@ -43,7 +43,7 @@ func PodCondition() *PodConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *PodConditionApplyConfiguration {
+func (b *PodConditionApplyConfiguration) WithType(value corev1.PodConditionType) *PodConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -51,7 +51,7 @@ func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *Po
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PodConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodConditionApplyConfiguration {
+func (b *PodConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
index 7f156f817..22a745601 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use
// with apply.
type PodOSApplyConfiguration struct {
- Name *v1.OSName `json:"name,omitempty"`
+ Name *corev1.OSName `json:"name,omitempty"`
}
// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with
@@ -37,7 +37,7 @@ func PodOS() *PodOSApplyConfiguration {
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
-func (b *PodOSApplyConfiguration) WithName(value v1.OSName) *PodOSApplyConfiguration {
+func (b *PodOSApplyConfiguration) WithName(value corev1.OSName) *PodOSApplyConfiguration {
b.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
index 09746df1b..4298b1ca6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use
// with apply.
type PodReadinessGateApplyConfiguration struct {
- ConditionType *v1.PodConditionType `json:"conditionType,omitempty"`
+ ConditionType *corev1.PodConditionType `json:"conditionType,omitempty"`
}
// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with
@@ -37,7 +37,7 @@ func PodReadinessGate() *PodReadinessGateApplyConfiguration {
// WithConditionType sets the ConditionType field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConditionType field is set to the value of the last call.
-func (b *PodReadinessGateApplyConfiguration) WithConditionType(value v1.PodConditionType) *PodReadinessGateApplyConfiguration {
+func (b *PodReadinessGateApplyConfiguration) WithConditionType(value corev1.PodConditionType) *PodReadinessGateApplyConfiguration {
b.ConditionType = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
index 55085e630..f0a3e662c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
@@ -37,6 +37,7 @@ type PodSecurityContextApplyConfiguration struct {
FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"`
SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"`
AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"`
+ SELinuxChangePolicy *corev1.PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty"`
}
// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with
@@ -147,3 +148,11 @@ func (b *PodSecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArm
b.AppArmorProfile = value
return b
}
+
+// WithSELinuxChangePolicy sets the SELinuxChangePolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SELinuxChangePolicy field is set to the value of the last call.
+func (b *PodSecurityContextApplyConfiguration) WithSELinuxChangePolicy(value corev1.PodSELinuxChangePolicy) *PodSecurityContextApplyConfiguration {
+ b.SELinuxChangePolicy = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
index 8134e044f..96f6eb94b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
@@ -64,6 +64,7 @@ type PodSpecApplyConfiguration struct {
HostUsers *bool `json:"hostUsers,omitempty"`
SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"`
ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"`
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
}
// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with
@@ -444,3 +445,11 @@ func (b *PodSpecApplyConfiguration) WithResourceClaims(values ...*PodResourceCla
}
return b
}
+
+// WithResources sets the Resources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Resources field is set to the value of the last call.
+func (b *PodSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PodSpecApplyConfiguration {
+ b.Resources = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
index 0b68996cd..b79e1210a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use
// with apply.
type PodStatusApplyConfiguration struct {
- Phase *v1.PodPhase `json:"phase,omitempty"`
+ Phase *corev1.PodPhase `json:"phase,omitempty"`
Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"`
Message *string `json:"message,omitempty"`
Reason *string `json:"reason,omitempty"`
@@ -38,9 +38,9 @@ type PodStatusApplyConfiguration struct {
StartTime *metav1.Time `json:"startTime,omitempty"`
InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"`
ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"`
- QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"`
+ QOSClass *corev1.PodQOSClass `json:"qosClass,omitempty"`
EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"`
- Resize *v1.PodResizeStatus `json:"resize,omitempty"`
+ Resize *corev1.PodResizeStatus `json:"resize,omitempty"`
ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"`
}
@@ -53,7 +53,7 @@ func PodStatus() *PodStatusApplyConfiguration {
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
-func (b *PodStatusApplyConfiguration) WithPhase(value v1.PodPhase) *PodStatusApplyConfiguration {
+func (b *PodStatusApplyConfiguration) WithPhase(value corev1.PodPhase) *PodStatusApplyConfiguration {
b.Phase = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PodStatusApplyConfiguration) WithContainerStatuses(values ...*Container
// WithQOSClass sets the QOSClass field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the QOSClass field is set to the value of the last call.
-func (b *PodStatusApplyConfiguration) WithQOSClass(value v1.PodQOSClass) *PodStatusApplyConfiguration {
+func (b *PodStatusApplyConfiguration) WithQOSClass(value corev1.PodQOSClass) *PodStatusApplyConfiguration {
b.QOSClass = &value
return b
}
@@ -195,7 +195,7 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...*
// WithResize sets the Resize field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resize field is set to the value of the last call.
-func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration {
+func (b *PodStatusApplyConfiguration) WithResize(value corev1.PodResizeStatus) *PodStatusApplyConfiguration {
b.Resize = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
index b4c8a658a..7886ea2d9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use
// with apply.
type PodTemplateApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// PodTemplate constructs a declarative configuration of the PodTemplate type for use with
@@ -57,18 +57,18 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
+func ExtractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
return extractPodTemplate(podTemplate, fieldManager, "")
}
// ExtractPodTemplateStatus is the same as ExtractPodTemplate except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPodTemplateStatus(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
+func ExtractPodTemplateStatus(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
return extractPodTemplate(podTemplate, fieldManager, "status")
}
-func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) {
+func extractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) {
b := &PodTemplateApplyConfiguration{}
err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplateApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplat
// If called multiple times, the Name field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTempl
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplate
// If called multiple times, the UID field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTe
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithGeneration(value int64) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateApplyConfiguration {
+func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateApplyConfiguration {
+func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *P
// overwriting an existing map entries in Annotations field with the same key.
func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration {
+func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *PodTemplateApplyConfiguration) WithFinalizers(values ...string) *PodTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PodTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -251,5 +251,5 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PodTemplateApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
index 6146c01c7..2e0904a24 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use
// with apply.
type PodTemplateSpecApplyConfiguration struct {
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
}
// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with
@@ -42,7 +42,7 @@ func PodTemplateSpec() *PodTemplateSpecApplyConfiguration {
// If called multiple times, the Name field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -51,7 +51,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateS
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -60,7 +60,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodT
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -69,7 +69,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemp
// If called multiple times, the UID field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -78,7 +78,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -87,25 +87,25 @@ func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *P
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithGeneration(value int64) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration {
+func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration {
+func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -114,7 +114,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -124,11 +124,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -139,11 +139,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string
// overwriting an existing map entries in Annotations field with the same key.
func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -151,13 +151,13 @@ func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration {
+func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -168,14 +168,14 @@ func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow
func (b *PodTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *PodTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PodTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -190,5 +190,5 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PodTemplateSpecApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
index 5e738cabd..eff8fc2ac 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use
// with apply.
type PortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ Error *string `json:"error,omitempty"`
}
// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with
@@ -47,7 +47,7 @@ func (b *PortStatusApplyConfiguration) WithPort(value int32) *PortStatusApplyCon
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *PortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *PortStatusApplyConfiguration {
+func (b *PortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *PortStatusApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
index 3be1c9650..d6c654689 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
@@ -40,7 +40,7 @@ func Probe() *ProbeApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Exec field is set to the value of the last call.
func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *ProbeApplyConfiguration {
- b.Exec = value
+ b.ProbeHandlerApplyConfiguration.Exec = value
return b
}
@@ -48,7 +48,7 @@ func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration)
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTPGet field is set to the value of the last call.
func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *ProbeApplyConfiguration {
- b.HTTPGet = value
+ b.ProbeHandlerApplyConfiguration.HTTPGet = value
return b
}
@@ -56,7 +56,7 @@ func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfigura
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the TCPSocket field is set to the value of the last call.
func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *ProbeApplyConfiguration {
- b.TCPSocket = value
+ b.ProbeHandlerApplyConfiguration.TCPSocket = value
return b
}
@@ -64,7 +64,7 @@ func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GRPC field is set to the value of the last call.
func (b *ProbeApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeApplyConfiguration {
- b.GRPC = value
+ b.ProbeHandlerApplyConfiguration.GRPC = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
index b28f422dc..4ef551914 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use
// with apply.
type ReplicationControllerApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicationController constructs a declarative configuration of the ReplicationController type for use with
@@ -58,18 +58,18 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+func ExtractReplicationController(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
return extractReplicationController(replicationController, fieldManager, "")
}
// ExtractReplicationControllerStatus is the same as ExtractReplicationController except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractReplicationControllerStatus(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+func ExtractReplicationControllerStatus(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
return extractReplicationController(replicationController, fieldManager, "status")
}
-func extractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) {
+func extractReplicationController(replicationController *corev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) {
b := &ReplicationControllerApplyConfiguration{}
err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractReplicationController(replicationController *apicorev1.ReplicationCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *ReplicationControllerApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *Replic
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) *ReplicationControllerApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) *
// If called multiple times, the Name field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithName(value string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ReplicationControllerApplyConfiguration) WithName(value string) *Replic
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string)
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *R
// If called multiple times, the UID field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *Repl
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value stri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithGeneration(value int64) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration {
+func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration {
+func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value me
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds
// overwriting an existing map entries in Labels field with the same key.
func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string]string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string]
// overwriting an existing map entries in Annotations field with the same key.
func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[st
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration {
+func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...
func (b *ReplicationControllerApplyConfiguration) WithFinalizers(values ...string) *ReplicationControllerApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ReplicationControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ReplicationControllerApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
index 0d74c1db9..dfcecc053 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use
// with apply.
type ReplicationControllerConditionApplyConfiguration struct {
- Type *v1.ReplicationControllerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *corev1.ReplicationControllerConditionType `json:"type,omitempty"`
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with
@@ -42,7 +42,7 @@ func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration {
+func (b *ReplicationControllerConditionApplyConfiguration) WithType(value corev1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.Rep
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration {
+func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
index 5169cb4bc..0338780b3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use
// with apply.
type ResourceHealthApplyConfiguration struct {
- ResourceID *v1.ResourceID `json:"resourceID,omitempty"`
- Health *v1.ResourceHealthStatus `json:"health,omitempty"`
+ ResourceID *corev1.ResourceID `json:"resourceID,omitempty"`
+ Health *corev1.ResourceHealthStatus `json:"health,omitempty"`
}
// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with
@@ -38,7 +38,7 @@ func ResourceHealth() *ResourceHealthApplyConfiguration {
// WithResourceID sets the ResourceID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceID field is set to the value of the last call.
-func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration {
+func (b *ResourceHealthApplyConfiguration) WithResourceID(value corev1.ResourceID) *ResourceHealthApplyConfiguration {
b.ResourceID = &value
return b
}
@@ -46,7 +46,7 @@ func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *
// WithHealth sets the Health field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Health field is set to the value of the last call.
-func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration {
+func (b *ResourceHealthApplyConfiguration) WithHealth(value corev1.ResourceHealthStatus) *ResourceHealthApplyConfiguration {
b.Health = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
index 2b78ba703..cd67f104c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use
// with apply.
type ResourceQuotaApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"`
}
// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with
@@ -58,18 +58,18 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
+func ExtractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
return extractResourceQuota(resourceQuota, fieldManager, "")
}
// ExtractResourceQuotaStatus is the same as ExtractResourceQuota except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractResourceQuotaStatus(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
+func ExtractResourceQuotaStatus(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
return extractResourceQuota(resourceQuota, fieldManager, "status")
}
-func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) {
+func extractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) {
b := &ResourceQuotaApplyConfiguration{}
err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *ResourceQuotaApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *Resource
// If called multiple times, the Name field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *Resour
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQ
// If called multiple times, the UID field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuot
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *Res
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithGeneration(value int64) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration {
+func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration {
+func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration {
+func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *ResourceQuotaApplyConfiguration) WithFinalizers(values ...string) *ResourceQuotaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ResourceQuotaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ResourceQuotaApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
index 0012ace25..36d342fcd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use
// with apply.
type ResourceQuotaSpecApplyConfiguration struct {
- Hard *v1.ResourceList `json:"hard,omitempty"`
- Scopes []v1.ResourceQuotaScope `json:"scopes,omitempty"`
+ Hard *corev1.ResourceList `json:"hard,omitempty"`
+ Scopes []corev1.ResourceQuotaScope `json:"scopes,omitempty"`
ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"`
}
@@ -39,7 +39,7 @@ func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration {
// WithHard sets the Hard field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Hard field is set to the value of the last call.
-func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaSpecApplyConfiguration {
+func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaSpecApplyConfiguration {
b.Hard = &value
return b
}
@@ -47,7 +47,7 @@ func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *R
// WithScopes adds the given value to the Scopes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Scopes field.
-func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...v1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration {
+func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...corev1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration {
for i := range values {
b.Scopes = append(b.Scopes, values[i])
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
index 364b96eec..6338a1308 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use
// with apply.
type ResourceQuotaStatusApplyConfiguration struct {
- Hard *v1.ResourceList `json:"hard,omitempty"`
- Used *v1.ResourceList `json:"used,omitempty"`
+ Hard *corev1.ResourceList `json:"hard,omitempty"`
+ Used *corev1.ResourceList `json:"used,omitempty"`
}
// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with
@@ -38,7 +38,7 @@ func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration {
// WithHard sets the Hard field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Hard field is set to the value of the last call.
-func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration {
+func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration {
b.Hard = &value
return b
}
@@ -46,7 +46,7 @@ func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList)
// WithUsed sets the Used field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Used field is set to the value of the last call.
-func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration {
+func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration {
b.Used = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
index 51197862c..ea77647a9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use
// with apply.
type ResourceRequirementsApplyConfiguration struct {
- Limits *v1.ResourceList `json:"limits,omitempty"`
- Requests *v1.ResourceList `json:"requests,omitempty"`
+ Limits *corev1.ResourceList `json:"limits,omitempty"`
+ Requests *corev1.ResourceList `json:"requests,omitempty"`
Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"`
}
@@ -39,7 +39,7 @@ func ResourceRequirements() *ResourceRequirementsApplyConfiguration {
// WithLimits sets the Limits field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Limits field is set to the value of the last call.
-func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *ResourceRequirementsApplyConfiguration {
+func (b *ResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration {
b.Limits = &value
return b
}
@@ -47,7 +47,7 @@ func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceLis
// WithRequests sets the Requests field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Requests field is set to the value of the last call.
-func (b *ResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *ResourceRequirementsApplyConfiguration {
+func (b *ResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration {
b.Requests = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
index 1e63c87f8..e99586659 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use
// with apply.
type ResourceStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
+ Name *corev1.ResourceName `json:"name,omitempty"`
Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"`
}
@@ -38,7 +38,7 @@ func ResourceStatus() *ResourceStatusApplyConfiguration {
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
-func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration {
+func (b *ResourceStatusApplyConfiguration) WithName(value corev1.ResourceName) *ResourceStatusApplyConfiguration {
b.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
index c6ec87827..c2481f490 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use
// with apply.
type ScopedResourceSelectorRequirementApplyConfiguration struct {
- ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"`
- Operator *v1.ScopeSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ ScopeName *corev1.ResourceQuotaScope `json:"scopeName,omitempty"`
+ Operator *corev1.ScopeSelectorOperator `json:"operator,omitempty"`
+ Values []string `json:"values,omitempty"`
}
// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with
@@ -39,7 +39,7 @@ func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApply
// WithScopeName sets the ScopeName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ScopeName field is set to the value of the last call.
-func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value v1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration {
+func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value corev1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration {
b.ScopeName = &value
return b
}
@@ -47,7 +47,7 @@ func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(valu
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
-func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value v1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration {
+func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value corev1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration {
b.Operator = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
index eb3077a05..754bfd1b3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use
// with apply.
type SeccompProfileApplyConfiguration struct {
- Type *v1.SeccompProfileType `json:"type,omitempty"`
- LocalhostProfile *string `json:"localhostProfile,omitempty"`
+ Type *corev1.SeccompProfileType `json:"type,omitempty"`
+ LocalhostProfile *string `json:"localhostProfile,omitempty"`
}
// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with
@@ -38,7 +38,7 @@ func SeccompProfile() *SeccompProfileApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *SeccompProfileApplyConfiguration) WithType(value v1.SeccompProfileType) *SeccompProfileApplyConfiguration {
+func (b *SeccompProfileApplyConfiguration) WithType(value corev1.SeccompProfileType) *SeccompProfileApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
index 1d850b00b..9c8532d20 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
@@ -20,22 +20,22 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// SecretApplyConfiguration represents a declarative configuration of the Secret type for use
// with apply.
type SecretApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Immutable *bool `json:"immutable,omitempty"`
- Data map[string][]byte `json:"data,omitempty"`
- StringData map[string]string `json:"stringData,omitempty"`
- Type *corev1.SecretType `json:"type,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Immutable *bool `json:"immutable,omitempty"`
+ Data map[string][]byte `json:"data,omitempty"`
+ StringData map[string]string `json:"stringData,omitempty"`
+ Type *corev1.SecretType `json:"type,omitempty"`
}
// Secret constructs a declarative configuration of the Secret type for use with
@@ -89,7 +89,7 @@ func extractSecret(secret *corev1.Secret, fieldManager string, subresource strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -97,7 +97,7 @@ func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfigurat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -106,7 +106,7 @@ func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConf
// If called multiple times, the Name field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -115,7 +115,7 @@ func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfigurat
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -124,7 +124,7 @@ func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyCo
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -133,7 +133,7 @@ func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfi
// If called multiple times, the UID field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -142,7 +142,7 @@ func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfigur
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -151,25 +151,25 @@ func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretAppl
// If called multiple times, the Generation field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithGeneration(value int64) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *SecretApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SecretApplyConfiguration {
+func (b *SecretApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SecretApplyConfiguration {
+func (b *SecretApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -178,7 +178,7 @@ func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Sec
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -188,11 +188,11 @@ func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *
// overwriting an existing map entries in Labels field with the same key.
func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -203,11 +203,11 @@ func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *Secret
// overwriting an existing map entries in Annotations field with the same key.
func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -215,13 +215,13 @@ func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *S
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration {
+func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -232,14 +232,14 @@ func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere
func (b *SecretApplyConfiguration) WithFinalizers(values ...string) *SecretApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *SecretApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -290,5 +290,5 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *SecretApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
index ba99b7f5f..d3cc9f6a6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
@@ -35,7 +35,7 @@ func SecretEnvSource() *SecretEnvSourceApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *SecretEnvSourceApplyConfiguration) WithName(value string) *SecretEnvSourceApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
index 2d490b810..f1cd8b2d3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
@@ -36,7 +36,7 @@ func SecretKeySelector() *SecretKeySelectorApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *SecretKeySelectorApplyConfiguration) WithName(value string) *SecretKeySelectorApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
index 65ce3c66d..99fa36ecc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
@@ -36,7 +36,7 @@ func SecretProjection() *SecretProjectionApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *SecretProjectionApplyConfiguration) WithName(value string) *SecretProjectionApplyConfiguration {
- b.Name = &value
+ b.LocalObjectReferenceApplyConfiguration.Name = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
index 2dac0589d..85f6b25a9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ServiceApplyConfiguration represents a declarative configuration of the Service type for use
// with apply.
type ServiceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ServiceStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ServiceStatusApplyConfiguration `json:"status,omitempty"`
}
// Service constructs a declarative configuration of the Service type for use with
@@ -58,18 +58,18 @@ func Service(name, namespace string) *ServiceApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractService(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
+func ExtractService(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
return extractService(service, fieldManager, "")
}
// ExtractServiceStatus is the same as ExtractService except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractServiceStatus(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
+func ExtractServiceStatus(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
return extractService(service, fieldManager, "status")
}
-func extractService(service *apicorev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) {
+func extractService(service *corev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) {
b := &ServiceApplyConfiguration{}
err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractService(service *apicorev1.Service, fieldManager string, subresource
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithGeneration(value int64) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ServiceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceApplyConfiguration {
+func (b *ServiceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceApplyConfiguration {
+func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Se
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *Servi
// overwriting an existing map entries in Annotations field with the same key.
func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) *
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration {
+func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *ServiceApplyConfiguration) WithFinalizers(values ...string) *ServiceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ServiceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ServiceApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
index 26d33deb9..0d80ded9e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
@@ -19,22 +19,22 @@ limitations under the License.
package v1
import (
- apicorev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1 "k8s.io/api/core/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use
// with apply.
type ServiceAccountApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"`
- ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
- AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"`
+ ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
+ AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
}
// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with
@@ -59,18 +59,18 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+func ExtractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
return extractServiceAccount(serviceAccount, fieldManager, "")
}
// ExtractServiceAccountStatus is the same as ExtractServiceAccount except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractServiceAccountStatus(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+func ExtractServiceAccountStatus(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
return extractServiceAccount(serviceAccount, fieldManager, "status")
}
-func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) {
+func extractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) {
b := &ServiceAccountApplyConfiguration{}
err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManage
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccountApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccoun
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *ServiceAccountApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *Service
// If called multiple times, the Name field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccoun
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *Servi
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceA
// If called multiple times, the UID field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAcco
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,25 +150,25 @@ func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *Se
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithGeneration(value int64) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration {
+func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration {
+func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Ti
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value
// overwriting an existing map entries in Labels field with the same key.
func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -214,13 +214,13 @@ func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]st
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration {
+func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,14 +231,14 @@ func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.Own
func (b *ServiceAccountApplyConfiguration) WithFinalizers(values ...string) *ServiceAccountApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ServiceAccountApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -279,5 +279,5 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ServiceAccountApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
index e889f2134..4d5774d8d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
@@ -27,7 +27,7 @@ import (
// with apply.
type ServicePortApplyConfiguration struct {
Name *string `json:"name,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
AppProtocol *string `json:"appProtocol,omitempty"`
Port *int32 `json:"port,omitempty"`
TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
@@ -51,7 +51,7 @@ func (b *ServicePortApplyConfiguration) WithName(value string) *ServicePortApply
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *ServicePortApplyConfiguration) WithProtocol(value v1.Protocol) *ServicePortApplyConfiguration {
+func (b *ServicePortApplyConfiguration) WithProtocol(value corev1.Protocol) *ServicePortApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
index a34fb0552..4b9e43051 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// TaintApplyConfiguration represents a declarative configuration of the Taint type for use
// with apply.
type TaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *v1.TaintEffect `json:"effect,omitempty"`
- TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+ Effect *corev1.TaintEffect `json:"effect,omitempty"`
+ TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
}
// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with
@@ -57,7 +57,7 @@ func (b *TaintApplyConfiguration) WithValue(value string) *TaintApplyConfigurati
// WithEffect sets the Effect field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Effect field is set to the value of the last call.
-func (b *TaintApplyConfiguration) WithEffect(value v1.TaintEffect) *TaintApplyConfiguration {
+func (b *TaintApplyConfiguration) WithEffect(value corev1.TaintEffect) *TaintApplyConfiguration {
b.Effect = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
index 1bcc85b65..a0a0aac00 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use
// with apply.
type TolerationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *v1.TolerationOperator `json:"operator,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *v1.TaintEffect `json:"effect,omitempty"`
- TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
+ Key *string `json:"key,omitempty"`
+ Operator *corev1.TolerationOperator `json:"operator,omitempty"`
+ Value *string `json:"value,omitempty"`
+ Effect *corev1.TaintEffect `json:"effect,omitempty"`
+ TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
}
// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with
@@ -49,7 +49,7 @@ func (b *TolerationApplyConfiguration) WithKey(value string) *TolerationApplyCon
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
-func (b *TolerationApplyConfiguration) WithOperator(value v1.TolerationOperator) *TolerationApplyConfiguration {
+func (b *TolerationApplyConfiguration) WithOperator(value corev1.TolerationOperator) *TolerationApplyConfiguration {
b.Operator = &value
return b
}
@@ -65,7 +65,7 @@ func (b *TolerationApplyConfiguration) WithValue(value string) *TolerationApplyC
// WithEffect sets the Effect field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Effect field is set to the value of the last call.
-func (b *TolerationApplyConfiguration) WithEffect(value v1.TaintEffect) *TolerationApplyConfiguration {
+func (b *TolerationApplyConfiguration) WithEffect(value corev1.TaintEffect) *TolerationApplyConfiguration {
b.Effect = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
index b21d23351..ab814e8e0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
@@ -28,11 +28,11 @@ import (
type TopologySpreadConstraintApplyConfiguration struct {
MaxSkew *int32 `json:"maxSkew,omitempty"`
TopologyKey *string `json:"topologyKey,omitempty"`
- WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
+ WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
MinDomains *int32 `json:"minDomains,omitempty"`
- NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
- NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
+ NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
+ NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
}
@@ -61,7 +61,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value strin
// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call.
-func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value v1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration {
+func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration {
b.WhenUnsatisfiable = &value
return b
}
@@ -85,7 +85,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32)
// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
-func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
+func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
b.NodeAffinityPolicy = &value
return b
}
@@ -93,7 +93,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(valu
// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
-func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
+func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
b.NodeTaintsPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
index 9a48f8349..e47cd031d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
@@ -43,7 +43,7 @@ func (b *VolumeApplyConfiguration) WithName(value string) *VolumeApplyConfigurat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HostPath field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.HostPath = value
+ b.VolumeSourceApplyConfiguration.HostPath = value
return b
}
@@ -51,7 +51,7 @@ func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the EmptyDir field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.EmptyDir = value
+ b.VolumeSourceApplyConfiguration.EmptyDir = value
return b
}
@@ -59,7 +59,7 @@ func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GCEPersistentDisk field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.GCEPersistentDisk = value
+ b.VolumeSourceApplyConfiguration.GCEPersistentDisk = value
return b
}
@@ -67,7 +67,7 @@ func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDis
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AWSElasticBlockStore field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.AWSElasticBlockStore = value
+ b.VolumeSourceApplyConfiguration.AWSElasticBlockStore = value
return b
}
@@ -75,7 +75,7 @@ func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GitRepo field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.GitRepo = value
+ b.VolumeSourceApplyConfiguration.GitRepo = value
return b
}
@@ -83,7 +83,7 @@ func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Secret field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Secret = value
+ b.VolumeSourceApplyConfiguration.Secret = value
return b
}
@@ -91,7 +91,7 @@ func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NFS field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.NFS = value
+ b.VolumeSourceApplyConfiguration.NFS = value
return b
}
@@ -99,7 +99,7 @@ func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfigurat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ISCSI field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.ISCSI = value
+ b.VolumeSourceApplyConfiguration.ISCSI = value
return b
}
@@ -107,7 +107,7 @@ func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfig
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Glusterfs field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Glusterfs = value
+ b.VolumeSourceApplyConfiguration.Glusterfs = value
return b
}
@@ -115,7 +115,7 @@ func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PersistentVolumeClaim field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVolumeClaimVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.PersistentVolumeClaim = value
+ b.VolumeSourceApplyConfiguration.PersistentVolumeClaim = value
return b
}
@@ -123,7 +123,7 @@ func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RBD field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.RBD = value
+ b.VolumeSourceApplyConfiguration.RBD = value
return b
}
@@ -131,7 +131,7 @@ func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfigurat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FlexVolume field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.FlexVolume = value
+ b.VolumeSourceApplyConfiguration.FlexVolume = value
return b
}
@@ -139,7 +139,7 @@ func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Cinder field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Cinder = value
+ b.VolumeSourceApplyConfiguration.Cinder = value
return b
}
@@ -147,7 +147,7 @@ func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CephFS field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.CephFS = value
+ b.VolumeSourceApplyConfiguration.CephFS = value
return b
}
@@ -155,7 +155,7 @@ func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Flocker field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Flocker = value
+ b.VolumeSourceApplyConfiguration.Flocker = value
return b
}
@@ -163,7 +163,7 @@ func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DownwardAPI field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.DownwardAPI = value
+ b.VolumeSourceApplyConfiguration.DownwardAPI = value
return b
}
@@ -171,7 +171,7 @@ func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourc
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FC field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.FC = value
+ b.VolumeSourceApplyConfiguration.FC = value
return b
}
@@ -179,7 +179,7 @@ func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AzureFile field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.AzureFile = value
+ b.VolumeSourceApplyConfiguration.AzureFile = value
return b
}
@@ -187,7 +187,7 @@ func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ConfigMap field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.ConfigMap = value
+ b.VolumeSourceApplyConfiguration.ConfigMap = value
return b
}
@@ -195,7 +195,7 @@ func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VsphereVolume field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.VsphereVolume = value
+ b.VolumeSourceApplyConfiguration.VsphereVolume = value
return b
}
@@ -203,7 +203,7 @@ func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Quobyte field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Quobyte = value
+ b.VolumeSourceApplyConfiguration.Quobyte = value
return b
}
@@ -211,7 +211,7 @@ func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AzureDisk field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.AzureDisk = value
+ b.VolumeSourceApplyConfiguration.AzureDisk = value
return b
}
@@ -219,7 +219,7 @@ func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PhotonPersistentDisk field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.PhotonPersistentDisk = value
+ b.VolumeSourceApplyConfiguration.PhotonPersistentDisk = value
return b
}
@@ -227,7 +227,7 @@ func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersist
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Projected field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Projected = value
+ b.VolumeSourceApplyConfiguration.Projected = value
return b
}
@@ -235,7 +235,7 @@ func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PortworxVolume field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.PortworxVolume = value
+ b.VolumeSourceApplyConfiguration.PortworxVolume = value
return b
}
@@ -243,7 +243,7 @@ func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourc
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ScaleIO field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.ScaleIO = value
+ b.VolumeSourceApplyConfiguration.ScaleIO = value
return b
}
@@ -251,7 +251,7 @@ func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StorageOS field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.StorageOS = value
+ b.VolumeSourceApplyConfiguration.StorageOS = value
return b
}
@@ -259,7 +259,7 @@ func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CSI field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.CSI = value
+ b.VolumeSourceApplyConfiguration.CSI = value
return b
}
@@ -267,7 +267,7 @@ func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfigurat
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Ephemeral field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Ephemeral = value
+ b.VolumeSourceApplyConfiguration.Ephemeral = value
return b
}
@@ -275,6 +275,6 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Image field is set to the value of the last call.
func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration {
- b.Image = value
+ b.VolumeSourceApplyConfiguration.Image = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
index 49f22cc4e..ccd426a0c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use
// with apply.
type VolumeMountApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
- MountPath *string `json:"mountPath,omitempty"`
- SubPath *string `json:"subPath,omitempty"`
- MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"`
- SubPathExpr *string `json:"subPathExpr,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
+ MountPath *string `json:"mountPath,omitempty"`
+ SubPath *string `json:"subPath,omitempty"`
+ MountPropagation *corev1.MountPropagationMode `json:"mountPropagation,omitempty"`
+ SubPathExpr *string `json:"subPathExpr,omitempty"`
}
// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with
@@ -59,7 +59,7 @@ func (b *VolumeMountApplyConfiguration) WithReadOnly(value bool) *VolumeMountApp
// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RecursiveReadOnly field is set to the value of the last call.
-func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration {
+func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration {
b.RecursiveReadOnly = &value
return b
}
@@ -83,7 +83,7 @@ func (b *VolumeMountApplyConfiguration) WithSubPath(value string) *VolumeMountAp
// WithMountPropagation sets the MountPropagation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MountPropagation field is set to the value of the last call.
-func (b *VolumeMountApplyConfiguration) WithMountPropagation(value v1.MountPropagationMode) *VolumeMountApplyConfiguration {
+func (b *VolumeMountApplyConfiguration) WithMountPropagation(value corev1.MountPropagationMode) *VolumeMountApplyConfiguration {
b.MountPropagation = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
index a0a9b5401..f55c40723 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use
// with apply.
type VolumeMountStatusApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- MountPath *string `json:"mountPath,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
+ Name *string `json:"name,omitempty"`
+ MountPath *string `json:"mountPath,omitempty"`
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
}
// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with
@@ -64,7 +64,7 @@ func (b *VolumeMountStatusApplyConfiguration) WithReadOnly(value bool) *VolumeMo
// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the RecursiveReadOnly field is set to the value of the last call.
-func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration {
+func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration {
b.RecursiveReadOnly = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
index ae849f774..5c83ae6d4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use
// with apply.
type VolumeResourceRequirementsApplyConfiguration struct {
- Limits *v1.ResourceList `json:"limits,omitempty"`
- Requests *v1.ResourceList `json:"requests,omitempty"`
+ Limits *corev1.ResourceList `json:"limits,omitempty"`
+ Requests *corev1.ResourceList `json:"requests,omitempty"`
}
// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with
@@ -38,7 +38,7 @@ func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration
// WithLimits sets the Limits field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Limits field is set to the value of the last call.
-func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration {
+func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration {
b.Limits = &value
return b
}
@@ -46,7 +46,7 @@ func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.Resou
// WithRequests sets the Requests field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Requests field is set to the value of the last call.
-func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration {
+func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration {
b.Requests = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
index 12908deb6..b55c868cb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
@@ -19,16 +19,16 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
// with apply.
type EndpointPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- Port *int32 `json:"port,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+ AppProtocol *string `json:"appProtocol,omitempty"`
}
// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
@@ -48,7 +48,7 @@ func (b *EndpointPortApplyConfiguration) WithName(value string) *EndpointPortApp
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration {
+func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
index 97002d2bb..a27c0ab1a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
@@ -20,21 +20,21 @@ package v1
import (
discoveryv1 "k8s.io/api/discovery/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use
// with apply.
type EndpointSliceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- AddressType *discoveryv1.AddressType `json:"addressType,omitempty"`
- Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
- Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ AddressType *discoveryv1.AddressType `json:"addressType,omitempty"`
+ Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
+ Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
}
// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with
@@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint
// If called multiple times, the Name field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS
// If called multiple times, the UID field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,25 +150,25 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration {
+func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration {
+func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -214,13 +214,13 @@ func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration {
+func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,14 +231,14 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -279,5 +279,5 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EndpointSliceApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
index 888319bc0..46133ea32 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/discovery/v1beta1"
+ discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -32,7 +32,7 @@ import (
type EndpointSliceApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- AddressType *v1beta1.AddressType `json:"addressType,omitempty"`
+ AddressType *discoveryv1beta1.AddressType `json:"addressType,omitempty"`
Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
}
@@ -59,18 +59,18 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+func ExtractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
return extractEndpointSlice(endpointSlice, fieldManager, "")
}
// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractEndpointSliceStatus(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+func ExtractEndpointSliceStatus(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
return extractEndpointSlice(endpointSlice, fieldManager, "status")
}
-func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
+func extractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
b := &EndpointSliceApplyConfiguration{}
err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager str
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint
// If called multiple times, the Name field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS
// If called multiple times, the UID field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,7 +150,7 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -159,7 +159,7 @@ func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointS
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -168,7 +168,7 @@ func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -220,7 +220,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,7 +231,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -245,7 +245,7 @@ func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExis
// WithAddressType sets the AddressType field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the AddressType field is set to the value of the last call.
-func (b *EndpointSliceApplyConfiguration) WithAddressType(value v1beta1.AddressType) *EndpointSliceApplyConfiguration {
+func (b *EndpointSliceApplyConfiguration) WithAddressType(value discoveryv1beta1.AddressType) *EndpointSliceApplyConfiguration {
b.AddressType = &value
return b
}
@@ -279,5 +279,5 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EndpointSliceApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
index a6e98d1c8..64896c3d8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
@@ -19,34 +19,34 @@ limitations under the License.
package v1
import (
- apieventsv1 "k8s.io/api/events/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ eventsv1 "k8s.io/api/events/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// EventApplyConfiguration represents a declarative configuration of the Event type for use
// with apply.
type EventApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- EventTime *metav1.MicroTime `json:"eventTime,omitempty"`
- Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
- ReportingController *string `json:"reportingController,omitempty"`
- ReportingInstance *string `json:"reportingInstance,omitempty"`
- Action *string `json:"action,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
- Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
- Note *string `json:"note,omitempty"`
- Type *string `json:"type,omitempty"`
- DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
- DeprecatedFirstTimestamp *metav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
- DeprecatedLastTimestamp *metav1.Time `json:"deprecatedLastTimestamp,omitempty"`
- DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
+ Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
+ ReportingController *string `json:"reportingController,omitempty"`
+ ReportingInstance *string `json:"reportingInstance,omitempty"`
+ Action *string `json:"action,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
+ Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
+ Note *string `json:"note,omitempty"`
+ Type *string `json:"type,omitempty"`
+ DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
+ DeprecatedFirstTimestamp *apismetav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
+ DeprecatedLastTimestamp *apismetav1.Time `json:"deprecatedLastTimestamp,omitempty"`
+ DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
}
// Event constructs a declarative configuration of the Event type for use with
@@ -71,18 +71,18 @@ func Event(name, namespace string) *EventApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractEvent(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+func ExtractEvent(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
return extractEvent(event, fieldManager, "")
}
// ExtractEventStatus is the same as ExtractEvent except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractEventStatus(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+func ExtractEventStatus(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
return extractEvent(event, fieldManager, "status")
}
-func extractEvent(event *apieventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
+func extractEvent(event *eventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource)
if err != nil {
@@ -100,7 +100,7 @@ func extractEvent(event *apieventsv1.Event, fieldManager string, subresource str
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -162,25 +162,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E
// overwriting an existing map entries in Labels field with the same key.
func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -226,13 +226,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -243,21 +243,21 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
// WithEventTime sets the EventTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the EventTime field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration {
b.EventTime = &value
return b
}
@@ -345,7 +345,7 @@ func (b *EventApplyConfiguration) WithDeprecatedSource(value *corev1.EventSource
// WithDeprecatedFirstTimestamp sets the DeprecatedFirstTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeprecatedFirstTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.DeprecatedFirstTimestamp = &value
return b
}
@@ -353,7 +353,7 @@ func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time
// WithDeprecatedLastTimestamp sets the DeprecatedLastTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeprecatedLastTimestamp field is set to the value of the last call.
-func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value metav1.Time) *EventApplyConfiguration {
+func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value apismetav1.Time) *EventApplyConfiguration {
b.DeprecatedLastTimestamp = &value
return b
}
@@ -369,5 +369,5 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EventApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
index 18069c0d1..c90954bcc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
// with apply.
type EventSeriesApplyConfiguration struct {
- Count *int32 `json:"count,omitempty"`
- LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
+ Count *int32 `json:"count,omitempty"`
+ LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"`
}
// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with
@@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply
// WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastObservedTime field is set to the value of the last call.
-func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration {
+func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration {
b.LastObservedTime = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
index 890d95748..dc302e395 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
@@ -100,7 +100,7 @@ func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -162,7 +162,7 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -171,7 +171,7 @@ func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfigu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -180,7 +180,7 @@ func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Even
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E
// overwriting an existing map entries in Labels field with the same key.
func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -232,7 +232,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -243,7 +243,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -369,5 +369,5 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EventApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
index ff778529c..a75e38bfb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
@@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae
// overwriting an existing map entries in Annotations field with the same key.
func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DaemonSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
index 9b8057e69..0312a3099 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
@@ -20,18 +20,18 @@ package v1beta1
import (
v1 "k8s.io/api/core/v1"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
type DaemonSetConditionApplyConfiguration struct {
- Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *extensionsv1beta1.DaemonSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
@@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
+func (b *DaemonSetConditionApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
index e597b15a6..d3403605f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
)
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
- RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
+ Type *extensionsv1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with
@@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
+func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
index 6badc64d8..94fac18c6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
@@ -87,7 +87,7 @@ func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager st
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA
// If called multiple times, the Name field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De
// overwriting an existing map entries in Annotations field with the same key.
func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DeploymentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
index 79e109a77..2b64508d9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
@@ -20,19 +20,19 @@ package v1beta1
import (
v1 "k8s.io/api/core/v1"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
type DeploymentConditionApplyConfiguration struct {
- Type *v1beta1.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *extensionsv1beta1.DeploymentConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
@@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
+func (b *DeploymentConditionApplyConfiguration) WithType(value extensionsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
index 2d88406eb..b142b0deb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
)
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
type DeploymentStrategyApplyConfiguration struct {
- Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ Type *extensionsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
@@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
+func (b *DeploymentStrategyApplyConfiguration) WithType(value extensionsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
index 3826e0ddd..32e0c8b1d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
)
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
type HTTPIngressPathApplyConfiguration struct {
Path *string `json:"path,omitempty"`
- PathType *v1beta1.PathType `json:"pathType,omitempty"`
+ PathType *extensionsv1beta1.PathType `json:"pathType,omitempty"`
Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
@@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP
// WithPathType sets the PathType field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PathType field is set to the value of the last call.
-func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration {
+func (b *HTTPIngressPathApplyConfiguration) WithPathType(value extensionsv1beta1.PathType) *HTTPIngressPathApplyConfiguration {
b.PathType = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
index 6738bf07b..8cc05cc62 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
@@ -87,7 +87,7 @@ func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, sub
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre
// overwriting an existing map entries in Annotations field with the same key.
func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IngressApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
index dc676f7b6..809fada92 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
@@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTP field is set to the value of the last call.
func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration {
- b.HTTP = value
+ b.IngressRuleValueApplyConfiguration.HTTP = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
index fb1f95a6d..5ce0eb31f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
@@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldM
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP
// If called multiple times, the Name field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo
// If called multiple times, the UID field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net
// If called multiple times, the Generation field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPo
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -251,5 +251,5 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *NetworkPolicyApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
index 24c6b6ad1..97a972f53 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
@@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager st
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA
// If called multiple times, the Name field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re
// overwriting an existing map entries in Annotations field with the same key.
func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ReplicaSetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
index 21a25ae81..540079fe5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
@@ -20,18 +20,18 @@ package v1beta1
import (
v1 "k8s.io/api/core/v1"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
type ReplicaSetConditionApplyConfiguration struct {
- Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *extensionsv1beta1.ReplicaSetConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
@@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
+func (b *ReplicaSetConditionApplyConfiguration) WithType(value extensionsv1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
index 101aa055b..53e73439e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
@@ -30,8 +30,8 @@ import (
type ScaleApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *v1beta1.ScaleSpec `json:"spec,omitempty"`
- Status *v1beta1.ScaleStatus `json:"status,omitempty"`
+ Spec *extensionsv1beta1.ScaleSpec `json:"spec,omitempty"`
+ Status *extensionsv1beta1.ScaleStatus `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig
// If called multiple times, the Name field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu
// If called multiple times, the UID field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S
// overwriting an existing map entries in Labels field with the same key.
func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp
// overwriting an existing map entries in Annotations field with the same key.
func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen
func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithSpec(value extensionsv1beta1.ScaleSpec) *ScaleApplyConfiguration {
b.Spec = &value
return b
}
@@ -212,7 +212,7 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyC
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleApplyConfiguration {
+func (b *ScaleApplyConfiguration) WithStatus(value extensionsv1beta1.ScaleStatus) *ScaleApplyConfiguration {
b.Status = &value
return b
}
@@ -220,5 +220,5 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ScaleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
index 0f3b61af9..f8923ae7b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
)
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
type FlowDistinguisherMethodApplyConfiguration struct {
- Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"`
+ Type *flowcontrolv1.FlowDistinguisherMethodType `json:"type,omitempty"`
}
// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
@@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
+func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
index 9e3978af5..3219319ae 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiflowcontrolv1 "k8s.io/api/flowcontrol/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
// with apply.
type FlowSchemaApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
}
// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
@@ -57,18 +57,18 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+func ExtractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
return extractFlowSchema(flowSchema, fieldManager, "")
}
// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
return extractFlowSchema(flowSchema, fieldManager, "status")
}
-func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
+func extractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
b := &FlowSchemaApplyConfiguration{}
err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager str
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA
// If called multiple times, the Name field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc
// If called multiple times, the Generation field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
+func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
+func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl
// overwriting an existing map entries in Annotations field with the same key.
func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration {
+func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *FlowSchemaApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
index 5f26a66d2..d1c3dfbc6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
type FlowSchemaConditionApplyConfiguration struct {
- Type *v1.FlowSchemaConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1.FlowSchemaConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
@@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaCond
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
index 454ed8beb..dc2e919d7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
)
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
type LimitResponseApplyConfiguration struct {
- Type *v1.LimitResponseType `json:"type,omitempty"`
+ Type *flowcontrolv1.LimitResponseType `json:"type,omitempty"`
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
@@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration {
+func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1.LimitResponseType) *LimitResponseApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
index bcce2679c..50d5e5132 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apiflowcontrolv1 "k8s.io/api/flowcontrol/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
// with apply.
type PriorityLevelConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
}
// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
@@ -57,18 +57,18 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
}
// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
}
-func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
b := &PriorityLevelConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontro
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID)
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
+func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
+func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries m
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration {
+func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
index 42ccbfbf9..a7810adfb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
@@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
index 2262dedca..45e4cdcd8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
)
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *v1.PriorityLevelEnablement `json:"type,omitempty"`
+ Type *flowcontrolv1.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
@@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
index 1ec77ae89..e2f6f3849 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
)
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
type SubjectApplyConfiguration struct {
- Kind *v1.SubjectKind `json:"kind,omitempty"`
+ Kind *flowcontrolv1.SubjectKind `json:"kind,omitempty"`
User *UserSubjectApplyConfiguration `json:"user,omitempty"`
Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
@@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration {
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration {
+func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1.SubjectKind) *SubjectApplyConfiguration {
b.Kind = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
index 29a8999b8..11aa62bba 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
)
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
type FlowDistinguisherMethodApplyConfiguration struct {
- Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"`
+ Type *flowcontrolv1beta1.FlowDistinguisherMethodType `json:"type,omitempty"`
}
// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
@@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
+func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
index 09bd25890..f5d69b8a5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
@@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA
// If called multiple times, the Name field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc
// If called multiple times, the Generation field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl
// overwriting an existing map entries in Annotations field with the same key.
func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *FlowSchemaApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
index d1c3dbec6..e7dcb4366 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
type FlowSchemaConditionApplyConfiguration struct {
- Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"`
- Status *v1beta1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta1.FlowSchemaConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
@@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchem
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
index 66f327601..20e1b17bd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
)
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
type LimitResponseApplyConfiguration struct {
- Type *v1beta1.LimitResponseType `json:"type,omitempty"`
+ Type *flowcontrolv1beta1.LimitResponseType `json:"type,omitempty"`
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
@@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *LimitResponseApplyConfiguration) WithType(value v1beta1.LimitResponseType) *LimitResponseApplyConfiguration {
+func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta1.LimitResponseType) *LimitResponseApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
index c4243f874..54030159e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
@@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID)
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
index 1ad4a554b..74eda9170 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *v1beta1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
@@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
index b013845f4..775f476dd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
)
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"`
+ Type *flowcontrolv1beta1.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
@@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
index b5c231f6d..000508065 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
)
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
type SubjectApplyConfiguration struct {
- Kind *v1beta1.SubjectKind `json:"kind,omitempty"`
+ Kind *flowcontrolv1beta1.SubjectKind `json:"kind,omitempty"`
User *UserSubjectApplyConfiguration `json:"user,omitempty"`
Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
@@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration {
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *SubjectApplyConfiguration) WithKind(value v1beta1.SubjectKind) *SubjectApplyConfiguration {
+func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta1.SubjectKind) *SubjectApplyConfiguration {
b.Kind = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
index e3c4b97a7..3922c4729 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
)
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
type FlowDistinguisherMethodApplyConfiguration struct {
- Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"`
+ Type *flowcontrolv1beta2.FlowDistinguisherMethodType `json:"type,omitempty"`
}
// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
@@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
+func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
index ffc3af950..fcab6df87 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
@@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA
// If called multiple times, the Name field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc
// If called multiple times, the Generation field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl
// overwriting an existing map entries in Annotations field with the same key.
func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *FlowSchemaApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
index 44571d263..f47130eeb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
type FlowSchemaConditionApplyConfiguration struct {
- Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"`
- Status *v1beta2.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta2.FlowSchemaConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
@@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchem
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
index 38a513d30..58cd78006 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
)
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
type LimitResponseApplyConfiguration struct {
- Type *v1beta2.LimitResponseType `json:"type,omitempty"`
+ Type *flowcontrolv1beta2.LimitResponseType `json:"type,omitempty"`
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
@@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *LimitResponseApplyConfiguration) WithType(value v1beta2.LimitResponseType) *LimitResponseApplyConfiguration {
+func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta2.LimitResponseType) *LimitResponseApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
index 7d52ca2c2..116bcfd31 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
@@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID)
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
index ddb17e984..caf517be3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *v1beta2.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
@@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
index c083ad0ba..c680ea1ef 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
)
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"`
+ Type *flowcontrolv1beta2.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
@@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
index 2cfaab43d..2b569a628 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
)
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
type SubjectApplyConfiguration struct {
- Kind *v1beta2.SubjectKind `json:"kind,omitempty"`
+ Kind *flowcontrolv1beta2.SubjectKind `json:"kind,omitempty"`
User *UserSubjectApplyConfiguration `json:"user,omitempty"`
Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
@@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration {
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *SubjectApplyConfiguration) WithKind(value v1beta2.SubjectKind) *SubjectApplyConfiguration {
+func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta2.SubjectKind) *SubjectApplyConfiguration {
b.Kind = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
index 49d84bd86..cc32fa100 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
)
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
type FlowDistinguisherMethodApplyConfiguration struct {
- Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"`
+ Type *flowcontrolv1beta3.FlowDistinguisherMethodType `json:"type,omitempty"`
}
// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with
@@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
+func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
index 1f69c43b2..5f6416c7c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
@@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA
// If called multiple times, the Name field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp
// If called multiple times, the UID field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc
// If called multiple times, the Generation field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6
// overwriting an existing map entries in Labels field with the same key.
func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl
// overwriting an existing map entries in Annotations field with the same key.
func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe
func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *FlowSchemaApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
index 41d623aeb..d5ba21f71 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
type FlowSchemaConditionApplyConfiguration struct {
- Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"`
- Status *v1beta3.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta3.FlowSchemaConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
@@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchem
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
+func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
index 8deaabdeb..2c289c777 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
)
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
type LimitResponseApplyConfiguration struct {
- Type *v1beta3.LimitResponseType `json:"type,omitempty"`
+ Type *flowcontrolv1beta3.LimitResponseType `json:"type,omitempty"`
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
@@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *LimitResponseApplyConfiguration) WithType(value v1beta3.LimitResponseType) *LimitResponseApplyConfiguration {
+func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta3.LimitResponseType) *LimitResponseApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
index e7d1a3a5f..bb036c466 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
@@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID)
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value
func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
index 8e9687bb9..01695f144 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *v1beta3.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *flowcontrolv1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
@@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Type = &value
return b
}
@@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
+func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration {
b.Status = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
index 9fa1112ce..c95085478 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
)
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"`
+ Type *flowcontrolv1beta3.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
@@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
index c412b2a7a..46499f541 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
)
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
type SubjectApplyConfiguration struct {
- Kind *v1beta3.SubjectKind `json:"kind,omitempty"`
+ Kind *flowcontrolv1beta3.SubjectKind `json:"kind,omitempty"`
User *UserSubjectApplyConfiguration `json:"user,omitempty"`
Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
@@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration {
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *SubjectApplyConfiguration) WithKind(value v1beta3.SubjectKind) *SubjectApplyConfiguration {
+func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta3.SubjectKind) *SubjectApplyConfiguration {
b.Kind = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
index 43c9ae05a..cd9fcd98b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
@@ -19,8 +19,8 @@ limitations under the License.
package internal
import (
- "fmt"
- "sync"
+ fmt "fmt"
+ sync "sync"
typed "sigs.k8s.io/structured-merge-diff/v4/typed"
)
@@ -512,6 +512,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: url
type:
scalar: string
+- name: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration
+ map:
+ fields:
+ - name: expression
+ type:
+ scalar: string
- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation
map:
fields:
@@ -534,6 +540,12 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.admissionregistration.v1alpha1.JSONPatch
+ map:
+ fields:
+ - name: expression
+ type:
+ scalar: string
- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition
map:
fields:
@@ -570,6 +582,100 @@ var schemaYAML = typed.YAMLObject(`types:
namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations
elementRelationship: atomic
elementRelationship: atomic
+- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec
+ default: {}
+- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec
+ default: {}
+- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec
+ map:
+ fields:
+ - name: matchResources
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources
+ - name: paramRef
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.ParamRef
+ - name: policyName
+ type:
+ scalar: string
+- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec
+ map:
+ fields:
+ - name: failurePolicy
+ type:
+ scalar: string
+ - name: matchConditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition
+ elementRelationship: associative
+ keys:
+ - name
+ - name: matchConstraints
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources
+ - name: mutations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.Mutation
+ elementRelationship: atomic
+ - name: paramKind
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.ParamKind
+ - name: reinvocationPolicy
+ type:
+ scalar: string
+ - name: variables
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.Variable
+ elementRelationship: atomic
+- name: io.k8s.api.admissionregistration.v1alpha1.Mutation
+ map:
+ fields:
+ - name: applyConfiguration
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration
+ - name: jsonPatch
+ type:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.JSONPatch
+ - name: patchType
+ type:
+ scalar: string
+ default: ""
- name: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations
map:
fields:
@@ -4365,7 +4471,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: strategy
type:
scalar: string
-- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate
+- name: io.k8s.api.coordination.v1alpha2.LeaseCandidate
map:
fields:
- name: apiVersion
@@ -4380,14 +4486,15 @@ var schemaYAML = typed.YAMLObject(`types:
default: {}
- name: spec
type:
- namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec
+ namedType: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec
default: {}
-- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec
+- name: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec
map:
fields:
- name: binaryVersion
type:
scalar: string
+ default: ""
- name: emulationVersion
type:
scalar: string
@@ -4398,15 +4505,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: pingTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
- - name: preferredStrategies
- type:
- list:
- elementType:
- scalar: string
- elementRelationship: atomic
- name: renewTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime
+ - name: strategy
+ type:
+ scalar: string
- name: io.k8s.api.coordination.v1beta1.Lease
map:
fields:
@@ -6920,6 +7024,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: runAsUser
type:
scalar: numeric
+ - name: seLinuxChangePolicy
+ type:
+ scalar: string
- name: seLinuxOptions
type:
namedType: io.k8s.api.core.v1.SELinuxOptions
@@ -7060,6 +7167,9 @@ var schemaYAML = typed.YAMLObject(`types:
elementRelationship: associative
keys:
- name
+ - name: resources
+ type:
+ namedType: io.k8s.api.core.v1.ResourceRequirements
- name: restartPolicy
type:
scalar: string
@@ -12244,12 +12354,38 @@ var schemaYAML = typed.YAMLObject(`types:
- name: namespace
type:
scalar: string
-- name: io.k8s.api.resource.v1alpha3.AllocationResult
+- name: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus
map:
fields:
- - name: controller
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: data
+ type:
+ namedType: __untyped_atomic_
+ - name: device
+ type:
+ scalar: string
+ default: ""
+ - name: driver
+ type:
+ scalar: string
+ default: ""
+ - name: networkData
+ type:
+ namedType: io.k8s.api.resource.v1alpha3.NetworkDeviceData
+ - name: pool
type:
scalar: string
+ default: ""
+- name: io.k8s.api.resource.v1alpha3.AllocationResult
+ map:
+ fields:
- name: devices
type:
namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult
@@ -12404,9 +12540,6 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.resource.v1alpha3.DeviceSelector
elementRelationship: atomic
- - name: suitableNodes
- type:
- namedType: io.k8s.api.core.v1.NodeSelector
- name: io.k8s.api.resource.v1alpha3.DeviceConstraint
map:
fields:
@@ -12425,7 +12558,6 @@ var schemaYAML = typed.YAMLObject(`types:
- name: adminAccess
type:
scalar: boolean
- default: false
- name: allocationMode
type:
scalar: string
@@ -12449,6 +12581,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult
map:
fields:
+ - name: adminAccess
+ type:
+ scalar: boolean
- name: device
type:
scalar: string
@@ -12471,60 +12606,31 @@ var schemaYAML = typed.YAMLObject(`types:
- name: cel
type:
namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector
-- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
- map:
- fields:
- - name: driver
- type:
- scalar: string
- default: ""
- - name: parameters
- type:
- namedType: __untyped_atomic_
-- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext
+- name: io.k8s.api.resource.v1alpha3.NetworkDeviceData
map:
fields:
- - name: apiVersion
+ - name: hardwareAddress
type:
scalar: string
- - name: kind
+ - name: interfaceName
type:
scalar: string
- - name: metadata
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
- default: {}
- - name: spec
- type:
- namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec
- default: {}
- - name: status
- type:
- namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus
- default: {}
-- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec
- map:
- fields:
- - name: potentialNodes
+ - name: ips
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- - name: selectedNode
- type:
- scalar: string
-- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus
+- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration
map:
fields:
- - name: resourceClaims
+ - name: driver
type:
- list:
- elementType:
- namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus
- elementRelationship: associative
- keys:
- - name
+ scalar: string
+ default: ""
+ - name: parameters
+ type:
+ namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha3.ResourceClaim
map:
fields:
@@ -12564,25 +12670,9 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
-- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus
- map:
- fields:
- - name: name
- type:
- scalar: string
- default: ""
- - name: unsuitableNodes
- type:
- list:
- elementType:
- scalar: string
- elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec
map:
fields:
- - name: controller
- type:
- scalar: string
- name: devices
type:
namedType: io.k8s.api.resource.v1alpha3.DeviceClaim
@@ -12593,9 +12683,16 @@ var schemaYAML = typed.YAMLObject(`types:
- name: allocation
type:
namedType: io.k8s.api.resource.v1alpha3.AllocationResult
- - name: deallocationRequested
+ - name: devices
type:
- scalar: boolean
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus
+ elementRelationship: associative
+ keys:
+ - driver
+ - device
+ - pool
- name: reservedFor
type:
list:
@@ -12690,49 +12787,488 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.resource.v1alpha3.ResourcePool
default: {}
-- name: io.k8s.api.scheduling.v1.PriorityClass
+- name: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus
map:
fields:
- - name: apiVersion
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: data
+ type:
+ namedType: __untyped_atomic_
+ - name: device
type:
scalar: string
- - name: description
+ default: ""
+ - name: driver
type:
scalar: string
- - name: globalDefault
+ default: ""
+ - name: networkData
type:
- scalar: boolean
- - name: kind
+ namedType: io.k8s.api.resource.v1beta1.NetworkDeviceData
+ - name: pool
type:
scalar: string
- - name: metadata
+ default: ""
+- name: io.k8s.api.resource.v1beta1.AllocationResult
+ map:
+ fields:
+ - name: devices
type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ namedType: io.k8s.api.resource.v1beta1.DeviceAllocationResult
default: {}
- - name: preemptionPolicy
- type:
- scalar: string
- - name: value
+ - name: nodeSelector
type:
- scalar: numeric
- default: 0
-- name: io.k8s.api.scheduling.v1alpha1.PriorityClass
+ namedType: io.k8s.api.core.v1.NodeSelector
+- name: io.k8s.api.resource.v1beta1.BasicDevice
map:
fields:
- - name: apiVersion
+ - name: attributes
type:
- scalar: string
- - name: description
+ map:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceAttribute
+ - name: capacity
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceCapacity
+- name: io.k8s.api.resource.v1beta1.CELDeviceSelector
+ map:
+ fields:
+ - name: expression
type:
scalar: string
- - name: globalDefault
+ default: ""
+- name: io.k8s.api.resource.v1beta1.Device
+ map:
+ fields:
+ - name: basic
type:
- scalar: boolean
- - name: kind
+ namedType: io.k8s.api.resource.v1beta1.BasicDevice
+ - name: name
type:
scalar: string
- - name: metadata
- type:
+ default: ""
+- name: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration
+ map:
+ fields:
+ - name: opaque
+ type:
+ namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration
+ - name: requests
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: source
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.resource.v1beta1.DeviceAllocationResult
+ map:
+ fields:
+ - name: config
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration
+ elementRelationship: atomic
+ - name: results
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceAttribute
+ map:
+ fields:
+ - name: bool
+ type:
+ scalar: boolean
+ - name: int
+ type:
+ scalar: numeric
+ - name: string
+ type:
+ scalar: string
+ - name: version
+ type:
+ scalar: string
+- name: io.k8s.api.resource.v1beta1.DeviceCapacity
+ map:
+ fields:
+ - name: value
+ type:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+- name: io.k8s.api.resource.v1beta1.DeviceClaim
+ map:
+ fields:
+ - name: config
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration
+ elementRelationship: atomic
+ - name: constraints
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceConstraint
+ elementRelationship: atomic
+ - name: requests
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceRequest
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration
+ map:
+ fields:
+ - name: opaque
+ type:
+ namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration
+ - name: requests
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceClass
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.resource.v1beta1.DeviceClassSpec
+ default: {}
+- name: io.k8s.api.resource.v1beta1.DeviceClassConfiguration
+ map:
+ fields:
+ - name: opaque
+ type:
+ namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration
+- name: io.k8s.api.resource.v1beta1.DeviceClassSpec
+ map:
+ fields:
+ - name: config
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceClassConfiguration
+ elementRelationship: atomic
+ - name: selectors
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceSelector
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceConstraint
+ map:
+ fields:
+ - name: matchAttribute
+ type:
+ scalar: string
+ - name: requests
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceRequest
+ map:
+ fields:
+ - name: adminAccess
+ type:
+ scalar: boolean
+ - name: allocationMode
+ type:
+ scalar: string
+ - name: count
+ type:
+ scalar: numeric
+ - name: deviceClassName
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: selectors
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.DeviceSelector
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult
+ map:
+ fields:
+ - name: adminAccess
+ type:
+ scalar: boolean
+ - name: device
+ type:
+ scalar: string
+ default: ""
+ - name: driver
+ type:
+ scalar: string
+ default: ""
+ - name: pool
+ type:
+ scalar: string
+ default: ""
+ - name: request
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.resource.v1beta1.DeviceSelector
+ map:
+ fields:
+ - name: cel
+ type:
+ namedType: io.k8s.api.resource.v1beta1.CELDeviceSelector
+- name: io.k8s.api.resource.v1beta1.NetworkDeviceData
+ map:
+ fields:
+ - name: hardwareAddress
+ type:
+ scalar: string
+ - name: interfaceName
+ type:
+ scalar: string
+ - name: ips
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration
+ map:
+ fields:
+ - name: driver
+ type:
+ scalar: string
+ default: ""
+ - name: parameters
+ type:
+ namedType: __untyped_atomic_
+- name: io.k8s.api.resource.v1beta1.ResourceClaim
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec
+ default: {}
+ - name: status
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourceClaimStatus
+ default: {}
+- name: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference
+ map:
+ fields:
+ - name: apiGroup
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: resource
+ type:
+ scalar: string
+ default: ""
+ - name: uid
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.resource.v1beta1.ResourceClaimSpec
+ map:
+ fields:
+ - name: devices
+ type:
+ namedType: io.k8s.api.resource.v1beta1.DeviceClaim
+ default: {}
+- name: io.k8s.api.resource.v1beta1.ResourceClaimStatus
+ map:
+ fields:
+ - name: allocation
+ type:
+ namedType: io.k8s.api.resource.v1beta1.AllocationResult
+ - name: devices
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus
+ elementRelationship: associative
+ keys:
+ - driver
+ - device
+ - pool
+ - name: reservedFor
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference
+ elementRelationship: associative
+ keys:
+ - uid
+- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplate
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec
+ default: {}
+- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec
+ map:
+ fields:
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec
+ default: {}
+- name: io.k8s.api.resource.v1beta1.ResourcePool
+ map:
+ fields:
+ - name: generation
+ type:
+ scalar: numeric
+ default: 0
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: resourceSliceCount
+ type:
+ scalar: numeric
+ default: 0
+- name: io.k8s.api.resource.v1beta1.ResourceSlice
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourceSliceSpec
+ default: {}
+- name: io.k8s.api.resource.v1beta1.ResourceSliceSpec
+ map:
+ fields:
+ - name: allNodes
+ type:
+ scalar: boolean
+ - name: devices
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.resource.v1beta1.Device
+ elementRelationship: atomic
+ - name: driver
+ type:
+ scalar: string
+ default: ""
+ - name: nodeName
+ type:
+ scalar: string
+ - name: nodeSelector
+ type:
+ namedType: io.k8s.api.core.v1.NodeSelector
+ - name: pool
+ type:
+ namedType: io.k8s.api.resource.v1beta1.ResourcePool
+ default: {}
+- name: io.k8s.api.scheduling.v1.PriorityClass
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: description
+ type:
+ scalar: string
+ - name: globalDefault
+ type:
+ scalar: boolean
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: preemptionPolicy
+ type:
+ scalar: string
+ - name: value
+ type:
+ scalar: numeric
+ default: 0
+- name: io.k8s.api.scheduling.v1alpha1.PriorityClass
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: description
+ type:
+ scalar: string
+ - name: globalDefault
+ type:
+ scalar: boolean
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: preemptionPolicy
@@ -13539,6 +14075,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: gracePeriodSeconds
type:
scalar: numeric
+ - name: ignoreStoreReadErrorWithClusterBreakingPotential
+ type:
+ scalar: boolean
- name: kind
type:
scalar: string
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
index 466aaebb6..69063df65 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
@@ -19,18 +19,18 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use
// with apply.
type ConditionApplyConfiguration struct {
- Type *string `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Status *metav1.ConditionStatus `json:"status,omitempty"`
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with
@@ -50,7 +50,7 @@ func (b *ConditionApplyConfiguration) WithType(value string) *ConditionApplyConf
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *ConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ConditionApplyConfiguration {
+func (b *ConditionApplyConfiguration) WithStatus(value metav1.ConditionStatus) *ConditionApplyConfiguration {
b.Status = &value
return b
}
@@ -66,7 +66,7 @@ func (b *ConditionApplyConfiguration) WithObservedGeneration(value int64) *Condi
// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the LastTransitionTime field is set to the value of the last call.
-func (b *ConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *ConditionApplyConfiguration {
+func (b *ConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ConditionApplyConfiguration {
b.LastTransitionTime = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
index 313bb9784..ab398ef56 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
@@ -25,12 +25,13 @@ import (
// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use
// with apply.
type DeleteOptionsApplyConfiguration struct {
- TypeMetaApplyConfiguration `json:",inline"`
- GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
- Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"`
- OrphanDependents *bool `json:"orphanDependents,omitempty"`
- PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"`
- DryRun []string `json:"dryRun,omitempty"`
+ TypeMetaApplyConfiguration `json:",inline"`
+ GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
+ Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"`
+ OrphanDependents *bool `json:"orphanDependents,omitempty"`
+ PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"`
+ DryRun []string `json:"dryRun,omitempty"`
+ IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty"`
}
// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with
@@ -46,7 +47,7 @@ func DeleteOptions() *DeleteOptionsApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -54,7 +55,7 @@ func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeleteOptionsApplyConfiguration) WithAPIVersion(value string) *DeleteOptionsApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -99,3 +100,11 @@ func (b *DeleteOptionsApplyConfiguration) WithDryRun(values ...string) *DeleteOp
}
return b
}
+
+// WithIgnoreStoreReadErrorWithClusterBreakingPotential sets the IgnoreStoreReadErrorWithClusterBreakingPotential field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IgnoreStoreReadErrorWithClusterBreakingPotential field is set to the value of the last call.
+func (b *DeleteOptionsApplyConfiguration) WithIgnoreStoreReadErrorWithClusterBreakingPotential(value bool) *DeleteOptionsApplyConfiguration {
+ b.IgnoreStoreReadErrorWithClusterBreakingPotential = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
index bd9db9659..c8b015c98 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use
// with apply.
type LabelSelectorRequirementApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *v1.LabelSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ Key *string `json:"key,omitempty"`
+ Operator *metav1.LabelSelectorOperator `json:"operator,omitempty"`
+ Values []string `json:"values,omitempty"`
}
// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with
@@ -47,7 +47,7 @@ func (b *LabelSelectorRequirementApplyConfiguration) WithKey(value string) *Labe
// WithOperator sets the Operator field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operator field is set to the value of the last call.
-func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value v1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration {
+func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value metav1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration {
b.Operator = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
index 6913df822..7175537c3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use
// with apply.
type ManagedFieldsEntryApplyConfiguration struct {
- Manager *string `json:"manager,omitempty"`
- Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"`
- APIVersion *string `json:"apiVersion,omitempty"`
- Time *v1.Time `json:"time,omitempty"`
- FieldsType *string `json:"fieldsType,omitempty"`
- FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"`
- Subresource *string `json:"subresource,omitempty"`
+ Manager *string `json:"manager,omitempty"`
+ Operation *metav1.ManagedFieldsOperationType `json:"operation,omitempty"`
+ APIVersion *string `json:"apiVersion,omitempty"`
+ Time *metav1.Time `json:"time,omitempty"`
+ FieldsType *string `json:"fieldsType,omitempty"`
+ FieldsV1 *metav1.FieldsV1 `json:"fieldsV1,omitempty"`
+ Subresource *string `json:"subresource,omitempty"`
}
// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with
@@ -51,7 +51,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithManager(value string) *Manage
// WithOperation sets the Operation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Operation field is set to the value of the last call.
-func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value v1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration {
+func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value metav1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration {
b.Operation = &value
return b
}
@@ -67,7 +67,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithAPIVersion(value string) *Man
// WithTime sets the Time field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Time field is set to the value of the last call.
-func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value v1.Time) *ManagedFieldsEntryApplyConfiguration {
+func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value metav1.Time) *ManagedFieldsEntryApplyConfiguration {
b.Time = &value
return b
}
@@ -83,7 +83,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsType(value string) *Man
// WithFieldsV1 sets the FieldsV1 field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FieldsV1 field is set to the value of the last call.
-func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value v1.FieldsV1) *ManagedFieldsEntryApplyConfiguration {
+func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value metav1.FieldsV1) *ManagedFieldsEntryApplyConfiguration {
b.FieldsV1 = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
index a9419975e..9b98d2209 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
)
@@ -32,8 +32,8 @@ type ObjectMetaApplyConfiguration struct {
UID *types.UID `json:"uid,omitempty"`
ResourceVersion *string `json:"resourceVersion,omitempty"`
Generation *int64 `json:"generation,omitempty"`
- CreationTimestamp *v1.Time `json:"creationTimestamp,omitempty"`
- DeletionTimestamp *v1.Time `json:"deletionTimestamp,omitempty"`
+ CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"`
+ DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty"`
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
@@ -98,7 +98,7 @@ func (b *ObjectMetaApplyConfiguration) WithGeneration(value int64) *ObjectMetaAp
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *ObjectMetaApplyConfiguration {
+func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration {
b.CreationTimestamp = &value
return b
}
@@ -106,7 +106,7 @@ func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *Obj
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value v1.Time) *ObjectMetaApplyConfiguration {
+func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration {
b.DeletionTimestamp = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
index e39670f29..96f9b1f56 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/networking/v1"
+ networkingv1 "k8s.io/api/networking/v1"
)
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
type HTTPIngressPathApplyConfiguration struct {
Path *string `json:"path,omitempty"`
- PathType *v1.PathType `json:"pathType,omitempty"`
+ PathType *networkingv1.PathType `json:"pathType,omitempty"`
Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
@@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP
// WithPathType sets the PathType field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PathType field is set to the value of the last call.
-func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1.PathType) *HTTPIngressPathApplyConfiguration {
+func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1.PathType) *HTTPIngressPathApplyConfiguration {
b.PathType = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
index 607c26e94..9e275f24f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apinetworkingv1 "k8s.io/api/networking/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
// with apply.
type IngressApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
- Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
}
// Ingress constructs a declarative configuration of the Ingress type for use with
@@ -58,18 +58,18 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractIngress(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+func ExtractIngress(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
return extractIngress(ingress, fieldManager, "")
}
// ExtractIngressStatus is the same as ExtractIngress except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractIngressStatus(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+func ExtractIngressStatus(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
return extractIngress(ingress, fieldManager, "status")
}
-func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
+func extractIngress(ingress *networkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
b := &IngressApplyConfiguration{}
err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subre
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration {
+func (b *IngressApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration {
+func (b *IngressApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre
// overwriting an existing map entries in Annotations field with the same key.
func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration {
+func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IngressApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
index 14acc7dbd..f723b5d70 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apinetworkingv1 "k8s.io/api/networking/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use
// with apply.
type IngressClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// IngressClass constructs a declarative configuration of the IngressClass type for use with
@@ -56,18 +56,18 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+func ExtractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
return extractIngressClass(ingressClass, fieldManager, "")
}
// ExtractIngressClassStatus is the same as ExtractIngressClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractIngressClassStatus(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+func ExtractIngressClassStatus(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
return extractIngressClass(ingressClass, fieldManager, "status")
}
-func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
+func extractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
b := &IngressClassApplyConfiguration{}
err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManage
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration {
+func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration {
+func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]stri
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration {
+func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *IngressClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -249,5 +249,5 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IngressClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
index b6411199f..84ba243ab 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
// with apply.
type IngressPortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ Error *string `json:"error,omitempty"`
}
// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
@@ -47,7 +47,7 @@ func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPort
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration {
+func (b *IngressPortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *IngressPortStatusApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
index 4ef871f07..20a1816bf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
@@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTP field is set to the value of the last call.
func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration {
- b.HTTP = value
+ b.IngressRuleValueApplyConfiguration.HTTP = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
index 3f8c8a535..e8da1be06 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apinetworkingv1 "k8s.io/api/networking/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use
// with apply.
type NetworkPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with
@@ -57,18 +57,18 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+func ExtractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
return extractNetworkPolicy(networkPolicy, fieldManager, "")
}
// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractNetworkPolicyStatus(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+func ExtractNetworkPolicyStatus(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
return extractNetworkPolicy(networkPolicy, fieldManager, "status")
}
-func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
+func extractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
b := &NetworkPolicyApplyConfiguration{}
err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldMan
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP
// If called multiple times, the Name field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo
// If called multiple times, the UID field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net
// If called multiple times, the Generation field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration {
+func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration {
+func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration {
+func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *NetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -251,5 +251,5 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *NetworkPolicyApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
index 046de3e23..716ceeeef 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use
// with apply.
type NetworkPolicyPeerApplyConfiguration struct {
- PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
- NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
+ PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
}
// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with
@@ -39,7 +39,7 @@ func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration {
// WithPodSelector sets the PodSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodSelector field is set to the value of the last call.
-func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration {
+func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration {
b.PodSelector = value
return b
}
@@ -47,7 +47,7 @@ func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSel
// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamespaceSelector field is set to the value of the last call.
-func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration {
+func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration {
b.NamespaceSelector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
index 581ef1c34..2ded0aecf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use
// with apply.
type NetworkPolicyPortApplyConfiguration struct {
- Protocol *v1.Protocol `json:"protocol,omitempty"`
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
Port *intstr.IntOrString `json:"port,omitempty"`
EndPort *int32 `json:"endPort,omitempty"`
}
@@ -40,7 +40,7 @@ func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration {
// WithProtocol sets the Protocol field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Protocol field is set to the value of the last call.
-func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value v1.Protocol) *NetworkPolicyPortApplyConfiguration {
+func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value corev1.Protocol) *NetworkPolicyPortApplyConfiguration {
b.Protocol = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
index da5ed5d35..48369b921 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1
import (
- apinetworkingv1 "k8s.io/api/networking/v1"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use
// with apply.
type NetworkPolicySpecApplyConfiguration struct {
- PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"`
Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"`
- PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"`
+ PolicyTypes []networkingv1.PolicyType `json:"policyTypes,omitempty"`
}
// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with
@@ -41,7 +41,7 @@ func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration {
// WithPodSelector sets the PodSelector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodSelector field is set to the value of the last call.
-func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration {
+func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration {
b.PodSelector = value
return b
}
@@ -75,7 +75,7 @@ func (b *NetworkPolicySpecApplyConfiguration) WithEgress(values ...*NetworkPolic
// WithPolicyTypes adds the given value to the PolicyTypes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the PolicyTypes field.
-func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...apinetworkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration {
+func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...networkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration {
for i := range values {
b.PolicyTypes = append(b.PolicyTypes, values[i])
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
index 999c23fa1..cc7880992 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go
@@ -84,7 +84,7 @@ func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddre
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressAppl
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPA
// overwriting an existing map entries in Annotations field with the same key.
func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IPAddressApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
index 984e049f2..27d3f271b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go
@@ -85,7 +85,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManage
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID
// If called multiple times, the Name field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR
// If called multiple times, the UID field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S
// overwriting an existing map entries in Annotations field with the same key.
func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ServiceCIDRApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
index 61b458f7e..c7301c6a3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/networking/v1beta1"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
)
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
type HTTPIngressPathApplyConfiguration struct {
Path *string `json:"path,omitempty"`
- PathType *v1beta1.PathType `json:"pathType,omitempty"`
+ PathType *networkingv1beta1.PathType `json:"pathType,omitempty"`
Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
@@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP
// WithPathType sets the PathType field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PathType field is set to the value of the last call.
-func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration {
+func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1beta1.PathType) *HTTPIngressPathApplyConfiguration {
b.PathType = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
index 0df53ea65..5d26cd75c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
@@ -87,7 +87,7 @@ func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, sub
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre
// overwriting an existing map entries in Annotations field with the same key.
func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IngressApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
index b0e877b57..272e0339f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
@@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldMana
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressCla
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IngressClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
index dc676f7b6..809fada92 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
@@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the HTTP field is set to the value of the last call.
func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration {
- b.HTTP = value
+ b.IngressRuleValueApplyConfiguration.HTTP = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
index 3047d79b9..f58b54da5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
@@ -84,7 +84,7 @@ func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddre
// If called multiple times, the Generation field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressAppl
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPA
// overwriting an existing map entries in Annotations field with the same key.
func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *IPAddressApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
index 4ef8e9eca..6a53db5c0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
@@ -85,7 +85,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID
// If called multiple times, the Name field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR
// If called multiple times, the UID field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S
// overwriting an existing map entries in Annotations field with the same key.
func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ServiceCIDRApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
index 6694538fc..30ce9fb42 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
)
// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
// with apply.
type OverheadApplyConfiguration struct {
- PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
+ PodFixed *corev1.ResourceList `json:"podFixed,omitempty"`
}
// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with
@@ -37,7 +37,7 @@ func Overhead() *OverheadApplyConfiguration {
// WithPodFixed sets the PodFixed field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodFixed field is set to the value of the last call.
-func (b *OverheadApplyConfiguration) WithPodFixed(value v1.ResourceList) *OverheadApplyConfiguration {
+func (b *OverheadApplyConfiguration) WithPodFixed(value corev1.ResourceList) *OverheadApplyConfiguration {
b.PodFixed = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
index 6ce01a319..067dc1703 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
@@ -19,22 +19,22 @@ limitations under the License.
package v1
import (
- apinodev1 "k8s.io/api/node/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ nodev1 "k8s.io/api/node/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
// with apply.
type RuntimeClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Handler *string `json:"handler,omitempty"`
- Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
- Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Handler *string `json:"handler,omitempty"`
+ Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
+ Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
}
// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
@@ -58,18 +58,18 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+func ExtractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
return extractRuntimeClass(runtimeClass, fieldManager, "")
}
// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractRuntimeClassStatus(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+func ExtractRuntimeClassStatus(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
return extractRuntimeClass(runtimeClass, fieldManager, "status")
}
-func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
+func extractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
+func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
+func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]stri
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration {
+func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -267,5 +267,5 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RuntimeClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
index 2d084e0f5..b45400fbc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/core/v1"
+ corev1 "k8s.io/client-go/applyconfigurations/core/v1"
)
// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
// with apply.
type SchedulingApplyConfiguration struct {
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ Tolerations []corev1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
}
// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
@@ -52,7 +52,7 @@ func (b *SchedulingApplyConfiguration) WithNodeSelector(entries map[string]strin
// WithTolerations adds the given value to the Tolerations field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Tolerations field.
-func (b *SchedulingApplyConfiguration) WithTolerations(values ...*v1.TolerationApplyConfiguration) *SchedulingApplyConfiguration {
+func (b *SchedulingApplyConfiguration) WithTolerations(values ...*corev1.TolerationApplyConfiguration) *SchedulingApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithTolerations")
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
index 9f139ee1b..5ddca3b6e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
@@ -84,7 +84,7 @@ func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager s
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RuntimeClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
index fa6c9f45b..b17de6763 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
@@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager st
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -267,5 +267,5 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RuntimeClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
index 3a051619f..079c6f3bc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
@@ -20,19 +20,19 @@ package v1
import (
policyv1 "k8s.io/api/policy/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use
// with apply.
type EvictionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ DeleteOptions *metav1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
}
// Eviction constructs a declarative configuration of the Eviction type for use with
@@ -86,7 +86,7 @@ func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresour
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply
// If called multiple times, the Name field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC
// If called multiple times, the UID field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration {
+func (b *EvictionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration {
+func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic
// overwriting an existing map entries in Annotations field with the same key.
func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration {
+func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,21 +229,21 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe
func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
// WithDeleteOptions sets the DeleteOptions field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeleteOptions field is set to the value of the last call.
-func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration {
+func (b *EvictionApplyConfiguration) WithDeleteOptions(value *metav1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration {
b.DeleteOptions = value
return b
}
@@ -251,5 +251,5 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EvictionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
index a765a7b62..82ec5a082 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apipolicyv1 "k8s.io/api/policy/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ policyv1 "k8s.io/api/policy/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use
// with apply.
type PodDisruptionBudgetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
}
// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with
@@ -58,18 +58,18 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "")
}
// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status")
}
-func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
+func extractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
b := &PodDisruptionBudgetApplyConfiguration{}
err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBu
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po
// If called multiple times, the Name field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod
// If called multiple times, the UID field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration {
+func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration {
+func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v
// overwriting an existing map entries in Labels field with the same key.
func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[stri
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration {
+func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v
func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PodDisruptionBudgetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -260,5 +260,5 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
index 291714545..3c66739bd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
@@ -21,14 +21,14 @@ package v1
import (
policyv1 "k8s.io/api/policy/v1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
// with apply.
type PodDisruptionBudgetSpecApplyConfiguration struct {
MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
}
@@ -50,7 +50,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMinAvailable(value intst
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
-func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration {
+func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration {
b.Selector = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
index d0f9baf41..d3c44d90a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use
// with apply.
type PodDisruptionBudgetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- DisruptedPods map[string]v1.Time `json:"disruptedPods,omitempty"`
- DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
- CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
- DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
- ExpectedPods *int32 `json:"expectedPods,omitempty"`
- Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty"`
+ DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
+ CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
+ DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
+ ExpectedPods *int32 `json:"expectedPods,omitempty"`
+ Conditions []applyconfigurationsmetav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with
@@ -53,9 +53,9 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithObservedGeneration(val
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the DisruptedPods field,
// overwriting an existing map entries in DisruptedPods field with the same key.
-func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]v1.Time) *PodDisruptionBudgetStatusApplyConfiguration {
+func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]metav1.Time) *PodDisruptionBudgetStatusApplyConfiguration {
if b.DisruptedPods == nil && len(entries) > 0 {
- b.DisruptedPods = make(map[string]v1.Time, len(entries))
+ b.DisruptedPods = make(map[string]metav1.Time, len(entries))
}
for k, v := range entries {
b.DisruptedPods[k] = v
@@ -98,7 +98,7 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithExpectedPods(value int
// WithConditions adds the given value to the Conditions field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Conditions field.
-func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration {
+func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*applyconfigurationsmetav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithConditions")
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
index d4121af20..0b5945935 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/policy/v1beta1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -57,18 +57,18 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractEviction(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+func ExtractEviction(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
return extractEviction(eviction, fieldManager, "")
}
// ExtractEvictionStatus is the same as ExtractEviction except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractEvictionStatus(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+func ExtractEvictionStatus(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
return extractEviction(eviction, fieldManager, "status")
}
-func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
+func extractEviction(eviction *policyv1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
b := &EvictionApplyConfiguration{}
err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresourc
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply
// If called multiple times, the Name field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC
// If called multiple times, the UID field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction
// If called multiple times, the Generation field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyC
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *E
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic
// overwriting an existing map entries in Annotations field with the same key.
func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe
func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -251,5 +251,5 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *EvictionApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
index 813b57bae..7743da76a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
@@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruption
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po
// If called multiple times, the Name field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod
// If called multiple times, the UID field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *Pod
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value meta
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v
// overwriting an existing map entries in Labels field with the same key.
func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st
// overwriting an existing map entries in Annotations field with the same key.
func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v
func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
index 405f1148b..d8fecf7a3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/policy/v1beta1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
intstr "k8s.io/apimachinery/pkg/util/intstr"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
@@ -27,10 +27,10 @@ import (
// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
// with apply.
type PodDisruptionBudgetSpecApplyConfiguration struct {
- MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
+ MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+ UnhealthyPodEvictionPolicy *policyv1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
}
// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with
@@ -66,7 +66,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int
// WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call.
-func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value v1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration {
+func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value policyv1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration {
b.UnhealthyPodEvictionPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
index 5ae4dc37f..b7049a8ef 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
// with apply.
type AggregationRuleApplyConfiguration struct {
- ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
+ ClusterRoleSelectors []metav1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
}
// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with
@@ -37,7 +37,7 @@ func AggregationRule() *AggregationRuleApplyConfiguration {
// WithClusterRoleSelectors adds the given value to the ClusterRoleSelectors field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the ClusterRoleSelectors field.
-func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*v1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration {
+func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*metav1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithClusterRoleSelectors")
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
index c5b0075ec..9b46fdbe9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apirbacv1 "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
// with apply.
type ClusterRoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
- AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
}
// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
@@ -57,18 +57,18 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+func ExtractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
return extractClusterRole(clusterRole, fieldManager, "")
}
// ExtractClusterRoleStatus is the same as ExtractClusterRole except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractClusterRoleStatus(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+func ExtractClusterRoleStatus(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
return extractClusterRole(clusterRole, fieldManager, "status")
}
-func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
+func extractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
+func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
+func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration {
+func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
index 91a9d5df3..7775bff0f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apirbacv1 "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
// with apply.
type ClusterRoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
@@ -57,18 +57,18 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
}
// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractClusterRoleBindingStatus(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
}
-func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
+func extractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
+func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
+func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration {
+func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
index b51f90426..b592753f6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apirbacv1 "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// RoleApplyConfiguration represents a declarative configuration of the Role type for use
// with apply.
type RoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// Role constructs a declarative configuration of the Role type for use with
@@ -57,18 +57,18 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractRole(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+func ExtractRole(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
return extractRole(role, fieldManager, "")
}
// ExtractRoleStatus is the same as ExtractRole except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractRoleStatus(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+func ExtractRoleStatus(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
return extractRole(role, fieldManager, "status")
}
-func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
+func extractRole(role *rbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource)
if err != nil {
@@ -86,7 +86,7 @@ func extractRole(role *apirbacv1.Role, fieldManager string, subresource string)
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,25 +148,25 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration {
+func (b *RoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration {
+func (b *RoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -212,13 +212,13 @@ func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *Rol
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration {
+func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,14 +229,14 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
index e59c8e6d3..32f12e87c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apirbacv1 "k8s.io/api/rbac/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
// with apply.
type RoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
@@ -58,18 +58,18 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+func ExtractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
return extractRoleBinding(roleBinding, fieldManager, "")
}
// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractRoleBindingStatus(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+func ExtractRoleBindingStatus(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
return extractRoleBinding(roleBinding, fieldManager, "status")
}
-func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
+func extractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,25 +149,25 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
+func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
+func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration {
+func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,14 +230,14 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
index dc0e34e53..ecc75d340 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
@@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
index d3c12ec50..3b8c43a39 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
@@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBindi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
index db0a4f716..3fbd98543 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
@@ -86,7 +86,7 @@ func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
index 8efcddd69..37c0d37cf 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
@@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
index 5e9c23854..124e47ef7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
@@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
index 2f088b93e..140e7e176 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
@@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBindin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu
// If called multiple times, the Name field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus
// If called multiple times, the UID field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -263,5 +263,5 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ClusterRoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
index 4b1b6112b..82240514f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
@@ -86,7 +86,7 @@ func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc
func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -256,5 +256,5 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
index 246928553..1c66b976e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
@@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager strin
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin
// If called multiple times, the Name field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding
// If called multiple times, the UID field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB
// If called multiple times, the Generation field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R
// overwriting an existing map entries in Annotations field with the same key.
func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -265,5 +265,5 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *RoleBindingApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go
new file mode 100644
index 000000000..da58d4348
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use
+// with apply.
+type AllocatedDeviceStatusApplyConfiguration struct {
+ Driver *string `json:"driver,omitempty"`
+ Pool *string `json:"pool,omitempty"`
+ Device *string `json:"device,omitempty"`
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"`
+}
+
+// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with
+// apply.
+func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration {
+ return &AllocatedDeviceStatusApplyConfiguration{}
+}
+
+// WithDriver sets the Driver field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Driver field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Driver = &value
+ return b
+}
+
+// WithPool sets the Pool field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Pool field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Pool = &value
+ return b
+}
+
+// WithDevice sets the Device field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Device field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Device = &value
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithData sets the Data field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Data field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration {
+ b.Data = &value
+ return b
+}
+
+// WithNetworkData sets the NetworkData field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkData field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration {
+ b.NetworkData = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go
index 3090b2f9d..7c7427ee9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go
@@ -27,7 +27,6 @@ import (
type AllocationResultApplyConfiguration struct {
Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- Controller *string `json:"controller,omitempty"`
}
// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
@@ -51,11 +50,3 @@ func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSele
b.NodeSelector = value
return b
}
-
-// WithController sets the Controller field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Controller field is set to the value of the last call.
-func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration {
- b.Controller = &value
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go
index e6b774508..b58e43294 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go
@@ -19,15 +19,15 @@ limitations under the License.
package v1alpha3
import (
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
resource "k8s.io/apimachinery/pkg/api/resource"
)
// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use
// with apply.
type BasicDeviceApplyConfiguration struct {
- Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
- Capacity map[v1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"`
+ Attributes map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
+ Capacity map[resourcev1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"`
}
// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with
@@ -40,9 +40,9 @@ func BasicDevice() *BasicDeviceApplyConfiguration {
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Attributes field,
// overwriting an existing map entries in Attributes field with the same key.
-func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration {
+func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration {
if b.Attributes == nil && len(entries) > 0 {
- b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries))
+ b.Attributes = make(map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries))
}
for k, v := range entries {
b.Attributes[k] = v
@@ -54,9 +54,9 @@ func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.Qual
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Capacity field,
// overwriting an existing map entries in Capacity field with the same key.
-func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration {
+func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration {
if b.Capacity == nil && len(entries) > 0 {
- b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries))
+ b.Capacity = make(map[resourcev1alpha3.QualifiedName]resource.Quantity, len(entries))
}
for k, v := range entries {
b.Capacity[k] = v
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go
index 342e724ef..25907e40d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1alpha3
import (
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
)
// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
// with apply.
type DeviceAllocationConfigurationApplyConfiguration struct {
- Source *v1alpha3.AllocationConfigSource `json:"source,omitempty"`
- Requests []string `json:"requests,omitempty"`
+ Source *resourcev1alpha3.AllocationConfigSource `json:"source,omitempty"`
+ Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
@@ -39,7 +39,7 @@ func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfigur
// WithSource sets the Source field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Source field is set to the value of the last call.
-func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration {
+func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration {
b.Source = &value
return b
}
@@ -58,6 +58,6 @@ func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ..
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Opaque field is set to the value of the last call.
func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration {
- b.Opaque = value
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go
index 4cabe9859..045798856 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go
@@ -45,6 +45,6 @@ func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Opaque field is set to the value of the last call.
func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration {
- b.Opaque = value
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go
index abaadbb36..ae3e396e0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go
@@ -84,7 +84,7 @@ func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApply
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClas
// If called multiple times, the Name field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApply
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceCl
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClass
// If called multiple times, the UID field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApp
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *Devic
// If called multiple times, the Generation field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClass
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time)
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time)
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int
// overwriting an existing map entries in Labels field with the same key.
func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *D
// overwriting an existing map entries in Annotations field with the same key.
func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR
func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConf
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *DeviceClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go
index cb3758a3e..6daa4a97e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go
@@ -34,6 +34,6 @@ func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration {
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Opaque field is set to the value of the last call.
func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration {
- b.Opaque = value
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go
index d40a43de6..37db6a1cc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go
@@ -18,16 +18,11 @@ limitations under the License.
package v1alpha3
-import (
- v1 "k8s.io/client-go/applyconfigurations/core/v1"
-)
-
// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
// with apply.
type DeviceClassSpecApplyConfiguration struct {
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
- SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"`
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
@@ -61,11 +56,3 @@ func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassCon
}
return b
}
-
-// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the SuitableNodes field is set to the value of the last call.
-func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration {
- b.SuitableNodes = value
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go
index 479acd57c..712f431f4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1alpha3
import (
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
)
// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
// with apply.
type DeviceConstraintApplyConfiguration struct {
- Requests []string `json:"requests,omitempty"`
- MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ Requests []string `json:"requests,omitempty"`
+ MatchAttribute *resourcev1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"`
}
// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with
@@ -48,7 +48,7 @@ func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *Dev
// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the MatchAttribute field is set to the value of the last call.
-func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration {
+func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration {
b.MatchAttribute = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go
index 712b9bf9b..4c3cffcf4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go
@@ -21,10 +21,11 @@ package v1alpha3
// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
// with apply.
type DeviceRequestAllocationResultApplyConfiguration struct {
- Request *string `json:"request,omitempty"`
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
+ Request *string `json:"request,omitempty"`
+ Driver *string `json:"driver,omitempty"`
+ Pool *string `json:"pool,omitempty"`
+ Device *string `json:"device,omitempty"`
+ AdminAccess *bool `json:"adminAccess,omitempty"`
}
// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
@@ -64,3 +65,11 @@ func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value strin
b.Device = &value
return b
}
+
+// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdminAccess field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration {
+ b.AdminAccess = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go
new file mode 100644
index 000000000..9ea773ed4
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go
@@ -0,0 +1,59 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha3
+
+// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use
+// with apply.
+type NetworkDeviceDataApplyConfiguration struct {
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ IPs []string `json:"ips,omitempty"`
+ HardwareAddress *string `json:"hardwareAddress,omitempty"`
+}
+
+// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with
+// apply.
+func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration {
+ return &NetworkDeviceDataApplyConfiguration{}
+}
+
+// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the InterfaceName field is set to the value of the last call.
+func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration {
+ b.InterfaceName = &value
+ return b
+}
+
+// WithIPs adds the given value to the IPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IPs field.
+func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration {
+ for i := range values {
+ b.IPs = append(b.IPs, values[i])
+ }
+ return b
+}
+
+// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HardwareAddress field is set to the value of the last call.
+func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration {
+ b.HardwareAddress = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go
deleted file mode 100644
index fd25df7a5..000000000
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use
-// with apply.
-type PodSchedulingContextSpecApplyConfiguration struct {
- SelectedNode *string `json:"selectedNode,omitempty"`
- PotentialNodes []string `json:"potentialNodes,omitempty"`
-}
-
-// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with
-// apply.
-func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration {
- return &PodSchedulingContextSpecApplyConfiguration{}
-}
-
-// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the SelectedNode field is set to the value of the last call.
-func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration {
- b.SelectedNode = &value
- return b
-}
-
-// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the PotentialNodes field.
-func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration {
- for i := range values {
- b.PotentialNodes = append(b.PotentialNodes, values[i])
- }
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go
deleted file mode 100644
index a06e370cc..000000000
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use
-// with apply.
-type PodSchedulingContextStatusApplyConfiguration struct {
- ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"`
-}
-
-// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with
-// apply.
-func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration {
- return &PodSchedulingContextStatusApplyConfiguration{}
-}
-
-// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the ResourceClaims field.
-func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration {
- for i := range values {
- if values[i] == nil {
- panic("nil value passed to WithResourceClaims")
- }
- b.ResourceClaims = append(b.ResourceClaims, *values[i])
- }
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go
index 616159558..96cf63f1f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go
@@ -87,7 +87,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldMa
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -95,7 +95,7 @@ func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -104,7 +104,7 @@ func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *Resource
// If called multiple times, the Name field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -113,7 +113,7 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -122,7 +122,7 @@ func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *Resour
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -131,7 +131,7 @@ func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceC
// If called multiple times, the UID field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -140,7 +140,7 @@ func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClai
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -149,7 +149,7 @@ func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *Res
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -158,7 +158,7 @@ func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceC
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -167,7 +167,7 @@ func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -176,7 +176,7 @@ func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -186,11 +186,11 @@ func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -201,11 +201,11 @@ func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -219,7 +219,7 @@ func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -230,7 +230,7 @@ func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -260,5 +260,5 @@ func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusA
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ResourceClaimApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go
deleted file mode 100644
index caab89acd..000000000
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use
-// with apply.
-type ResourceClaimSchedulingStatusApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- UnsuitableNodes []string `json:"unsuitableNodes,omitempty"`
-}
-
-// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with
-// apply.
-func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration {
- return &ResourceClaimSchedulingStatusApplyConfiguration{}
-}
-
-// WithName sets the Name field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Name field is set to the value of the last call.
-func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration {
- b.Name = &value
- return b
-}
-
-// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field.
-func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration {
- for i := range values {
- b.UnsuitableNodes = append(b.UnsuitableNodes, values[i])
- }
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go
index 7c5b65681..dfe8bdb14 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go
@@ -21,8 +21,7 @@ package v1alpha3
// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
// with apply.
type ResourceClaimSpecApplyConfiguration struct {
- Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
- Controller *string `json:"controller,omitempty"`
+ Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
}
// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with
@@ -38,11 +37,3 @@ func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimAppl
b.Devices = value
return b
}
-
-// WithController sets the Controller field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Controller field is set to the value of the last call.
-func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration {
- b.Controller = &value
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
index a52af3ec3..f0c32133a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go
@@ -21,9 +21,9 @@ package v1alpha3
// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
// with apply.
type ResourceClaimStatusApplyConfiguration struct {
- Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
- ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
- DeallocationRequested *bool `json:"deallocationRequested,omitempty"`
+ Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
+ Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
}
// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
@@ -53,10 +53,15 @@ func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*Resou
return b
}
-// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the DeallocationRequested field is set to the value of the last call.
-func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration {
- b.DeallocationRequested = &value
+// WithDevices adds the given value to the Devices field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Devices field.
+func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithDevices")
+ }
+ b.Devices = append(b.Devices, *values[i])
+ }
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go
index 6f371d0c0..1eb55eee4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go
@@ -86,7 +86,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.Resour
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -94,7 +94,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *Resour
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -103,7 +103,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *
// If called multiple times, the Name field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -112,7 +112,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *Resour
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -121,7 +121,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string)
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -130,7 +130,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *R
// If called multiple times, the UID field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -139,7 +139,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *Reso
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -148,7 +148,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value stri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -157,7 +157,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *R
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -166,7 +166,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value me
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -175,7 +175,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value me
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -185,11 +185,11 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds
// overwriting an existing map entries in Labels field with the same key.
func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -200,11 +200,11 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]
// overwriting an existing map entries in Annotations field with the same key.
func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,7 +218,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -229,7 +229,7 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...
func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -251,5 +251,5 @@ func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimT
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
index 5b03ab755..578f6bce1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go
@@ -42,7 +42,7 @@ func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration {
// If called multiple times, the Name field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -51,7 +51,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *Re
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -60,7 +60,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value str
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -69,7 +69,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string
// If called multiple times, the UID field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -78,7 +78,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -87,7 +87,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -96,7 +96,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -105,7 +105,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(valu
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -114,7 +114,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(valu
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -124,11 +124,11 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSec
// overwriting an existing map entries in Labels field with the same key.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -139,11 +139,11 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[str
// overwriting an existing map entries in Annotations field with the same key.
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -157,7 +157,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -168,7 +168,7 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values
func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -190,5 +190,5 @@ func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceCl
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go
index aaad68612..615cf3e06 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go
@@ -84,7 +84,7 @@ func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldMa
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *Resource
// If called multiple times, the Name field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *Resour
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceS
// If called multiple times, the UID field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSlic
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *Res
// If called multiple times, the Generation field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceS
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApply
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *ResourceSliceApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go
new file mode 100644
index 000000000..cd5189771
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use
+// with apply.
+type AllocatedDeviceStatusApplyConfiguration struct {
+ Driver *string `json:"driver,omitempty"`
+ Pool *string `json:"pool,omitempty"`
+ Device *string `json:"device,omitempty"`
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"`
+}
+
+// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with
+// apply.
+func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration {
+ return &AllocatedDeviceStatusApplyConfiguration{}
+}
+
+// WithDriver sets the Driver field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Driver field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Driver = &value
+ return b
+}
+
+// WithPool sets the Pool field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Pool field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Pool = &value
+ return b
+}
+
+// WithDevice sets the Device field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Device field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration {
+ b.Device = &value
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithData sets the Data field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Data field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration {
+ b.Data = &value
+ return b
+}
+
+// WithNetworkData sets the NetworkData field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkData field is set to the value of the last call.
+func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration {
+ b.NetworkData = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go
new file mode 100644
index 000000000..549ef71af
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/core/v1"
+)
+
+// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use
+// with apply.
+type AllocationResultApplyConfiguration struct {
+ Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+}
+
+// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
+// apply.
+func AllocationResult() *AllocationResultApplyConfiguration {
+ return &AllocationResultApplyConfiguration{}
+}
+
+// WithDevices sets the Devices field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Devices field is set to the value of the last call.
+func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration {
+ b.Devices = value
+ return b
+}
+
+// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeSelector field is set to the value of the last call.
+func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration {
+ b.NodeSelector = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go
new file mode 100644
index 000000000..691a8f15a
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go
@@ -0,0 +1,64 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+)
+
+// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use
+// with apply.
+type BasicDeviceApplyConfiguration struct {
+ Attributes map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
+ Capacity map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
+}
+
+// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with
+// apply.
+func BasicDevice() *BasicDeviceApplyConfiguration {
+ return &BasicDeviceApplyConfiguration{}
+}
+
+// WithAttributes puts the entries into the Attributes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Attributes field,
+// overwriting an existing map entries in Attributes field with the same key.
+func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration {
+ if b.Attributes == nil && len(entries) > 0 {
+ b.Attributes = make(map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration, len(entries))
+ }
+ for k, v := range entries {
+ b.Attributes[k] = v
+ }
+ return b
+}
+
+// WithCapacity puts the entries into the Capacity field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Capacity field,
+// overwriting an existing map entries in Capacity field with the same key.
+func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration) *BasicDeviceApplyConfiguration {
+ if b.Capacity == nil && len(entries) > 0 {
+ b.Capacity = make(map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration, len(entries))
+ }
+ for k, v := range entries {
+ b.Capacity[k] = v
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go
new file mode 100644
index 000000000..c4a28bbf8
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
+// with apply.
+type CELDeviceSelectorApplyConfiguration struct {
+ Expression *string `json:"expression,omitempty"`
+}
+
+// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with
+// apply.
+func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration {
+ return &CELDeviceSelectorApplyConfiguration{}
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration {
+ b.Expression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go
new file mode 100644
index 000000000..f635267e2
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceApplyConfiguration represents a declarative configuration of the Device type for use
+// with apply.
+type DeviceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"`
+}
+
+// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with
+// apply.
+func Device() *DeviceApplyConfiguration {
+ return &DeviceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithBasic sets the Basic field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Basic field is set to the value of the last call.
+func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration {
+ b.Basic = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go
new file mode 100644
index 000000000..b5218ba4a
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go
@@ -0,0 +1,63 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+)
+
+// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
+// with apply.
+type DeviceAllocationConfigurationApplyConfiguration struct {
+ Source *resourcev1beta1.AllocationConfigSource `json:"source,omitempty"`
+ Requests []string `json:"requests,omitempty"`
+ DeviceConfigurationApplyConfiguration `json:",inline"`
+}
+
+// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with
+// apply.
+func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration {
+ return &DeviceAllocationConfigurationApplyConfiguration{}
+}
+
+// WithSource sets the Source field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Source field is set to the value of the last call.
+func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1beta1.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration {
+ b.Source = &value
+ return b
+}
+
+// WithRequests adds the given value to the Requests field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Requests field.
+func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration {
+ for i := range values {
+ b.Requests = append(b.Requests, values[i])
+ }
+ return b
+}
+
+// WithOpaque sets the Opaque field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Opaque field is set to the value of the last call.
+func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration {
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go
new file mode 100644
index 000000000..bf309cf23
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use
+// with apply.
+type DeviceAllocationResultApplyConfiguration struct {
+ Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"`
+ Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
+}
+
+// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with
+// apply.
+func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration {
+ return &DeviceAllocationResultApplyConfiguration{}
+}
+
+// WithResults adds the given value to the Results field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Results field.
+func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResults")
+ }
+ b.Results = append(b.Results, *values[i])
+ }
+ return b
+}
+
+// WithConfig adds the given value to the Config field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Config field.
+func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConfig")
+ }
+ b.Config = append(b.Config, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go
new file mode 100644
index 000000000..6e88ae38a
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use
+// with apply.
+type DeviceAttributeApplyConfiguration struct {
+ IntValue *int64 `json:"int,omitempty"`
+ BoolValue *bool `json:"bool,omitempty"`
+ StringValue *string `json:"string,omitempty"`
+ VersionValue *string `json:"version,omitempty"`
+}
+
+// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with
+// apply.
+func DeviceAttribute() *DeviceAttributeApplyConfiguration {
+ return &DeviceAttributeApplyConfiguration{}
+}
+
+// WithIntValue sets the IntValue field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IntValue field is set to the value of the last call.
+func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration {
+ b.IntValue = &value
+ return b
+}
+
+// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BoolValue field is set to the value of the last call.
+func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration {
+ b.BoolValue = &value
+ return b
+}
+
+// WithStringValue sets the StringValue field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the StringValue field is set to the value of the last call.
+func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration {
+ b.StringValue = &value
+ return b
+}
+
+// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the VersionValue field is set to the value of the last call.
+func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration {
+ b.VersionValue = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go
new file mode 100644
index 000000000..dcb3504b8
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go
@@ -0,0 +1,43 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resource "k8s.io/apimachinery/pkg/api/resource"
+)
+
+// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use
+// with apply.
+type DeviceCapacityApplyConfiguration struct {
+ Value *resource.Quantity `json:"value,omitempty"`
+}
+
+// DeviceCapacityApplyConfiguration constructs a declarative configuration of the DeviceCapacity type for use with
+// apply.
+func DeviceCapacity() *DeviceCapacityApplyConfiguration {
+ return &DeviceCapacityApplyConfiguration{}
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *DeviceCapacityApplyConfiguration) WithValue(value resource.Quantity) *DeviceCapacityApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go
new file mode 100644
index 000000000..95c1c2e6e
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go
@@ -0,0 +1,72 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use
+// with apply.
+type DeviceClaimApplyConfiguration struct {
+ Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
+ Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
+ Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
+}
+
+// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with
+// apply.
+func DeviceClaim() *DeviceClaimApplyConfiguration {
+ return &DeviceClaimApplyConfiguration{}
+}
+
+// WithRequests adds the given value to the Requests field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Requests field.
+func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRequests")
+ }
+ b.Requests = append(b.Requests, *values[i])
+ }
+ return b
+}
+
+// WithConstraints adds the given value to the Constraints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Constraints field.
+func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConstraints")
+ }
+ b.Constraints = append(b.Constraints, *values[i])
+ }
+ return b
+}
+
+// WithConfig adds the given value to the Config field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Config field.
+func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConfig")
+ }
+ b.Config = append(b.Config, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go
new file mode 100644
index 000000000..beac5e9d9
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go
@@ -0,0 +1,50 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use
+// with apply.
+type DeviceClaimConfigurationApplyConfiguration struct {
+ Requests []string `json:"requests,omitempty"`
+ DeviceConfigurationApplyConfiguration `json:",inline"`
+}
+
+// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with
+// apply.
+func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration {
+ return &DeviceClaimConfigurationApplyConfiguration{}
+}
+
+// WithRequests adds the given value to the Requests field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Requests field.
+func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration {
+ for i := range values {
+ b.Requests = append(b.Requests, values[i])
+ }
+ return b
+}
+
+// WithOpaque sets the Opaque field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Opaque field is set to the value of the last call.
+func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration {
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go
new file mode 100644
index 000000000..c71e22259
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go
@@ -0,0 +1,253 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use
+// with apply.
+type DeviceClassApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// DeviceClass constructs a declarative configuration of the DeviceClass type for use with
+// apply.
+func DeviceClass(name string) *DeviceClassApplyConfiguration {
+ b := &DeviceClassApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("DeviceClass")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b
+}
+
+// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
+// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
+// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
+// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
+ return extractDeviceClass(deviceClass, fieldManager, "")
+}
+
+// ExtractDeviceClassStatus is the same as ExtractDeviceClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDeviceClassStatus(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
+ return extractDeviceClass(deviceClass, fieldManager, "status")
+}
+
+func extractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
+ b := &DeviceClassApplyConfiguration{}
+ err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1beta1.DeviceClass"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deviceClass.Name)
+
+ b.WithKind("DeviceClass")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *DeviceClassApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go
new file mode 100644
index 000000000..3ce90eab5
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use
+// with apply.
+type DeviceClassConfigurationApplyConfiguration struct {
+ DeviceConfigurationApplyConfiguration `json:",inline"`
+}
+
+// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with
+// apply.
+func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration {
+ return &DeviceClassConfigurationApplyConfiguration{}
+}
+
+// WithOpaque sets the Opaque field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Opaque field is set to the value of the last call.
+func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration {
+ b.DeviceConfigurationApplyConfiguration.Opaque = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go
new file mode 100644
index 000000000..901b0800e
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
+// with apply.
+type DeviceClassSpecApplyConfiguration struct {
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
+}
+
+// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
+// apply.
+func DeviceClassSpec() *DeviceClassSpecApplyConfiguration {
+ return &DeviceClassSpecApplyConfiguration{}
+}
+
+// WithSelectors adds the given value to the Selectors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Selectors field.
+func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSelectors")
+ }
+ b.Selectors = append(b.Selectors, *values[i])
+ }
+ return b
+}
+
+// WithConfig adds the given value to the Config field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Config field.
+func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConfig")
+ }
+ b.Config = append(b.Config, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go
new file mode 100644
index 000000000..b0f41f5a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use
+// with apply.
+type DeviceConfigurationApplyConfiguration struct {
+ Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"`
+}
+
+// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with
+// apply.
+func DeviceConfiguration() *DeviceConfigurationApplyConfiguration {
+ return &DeviceConfigurationApplyConfiguration{}
+}
+
+// WithOpaque sets the Opaque field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Opaque field is set to the value of the last call.
+func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration {
+ b.Opaque = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go
new file mode 100644
index 000000000..0c5fc2525
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go
@@ -0,0 +1,54 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+)
+
+// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
+// with apply.
+type DeviceConstraintApplyConfiguration struct {
+ Requests []string `json:"requests,omitempty"`
+ MatchAttribute *resourcev1beta1.FullyQualifiedName `json:"matchAttribute,omitempty"`
+}
+
+// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with
+// apply.
+func DeviceConstraint() *DeviceConstraintApplyConfiguration {
+ return &DeviceConstraintApplyConfiguration{}
+}
+
+// WithRequests adds the given value to the Requests field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Requests field.
+func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration {
+ for i := range values {
+ b.Requests = append(b.Requests, values[i])
+ }
+ return b
+}
+
+// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchAttribute field is set to the value of the last call.
+func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1beta1.FullyQualifiedName) *DeviceConstraintApplyConfiguration {
+ b.MatchAttribute = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go
new file mode 100644
index 000000000..ea454a275
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+)
+
+// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use
+// with apply.
+type DeviceRequestApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ Count *int64 `json:"count,omitempty"`
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+}
+
+// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with
+// apply.
+func DeviceRequest() *DeviceRequestApplyConfiguration {
+ return &DeviceRequestApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeviceClassName field is set to the value of the last call.
+func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration {
+ b.DeviceClassName = &value
+ return b
+}
+
+// WithSelectors adds the given value to the Selectors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Selectors field.
+func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSelectors")
+ }
+ b.Selectors = append(b.Selectors, *values[i])
+ }
+ return b
+}
+
+// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AllocationMode field is set to the value of the last call.
+func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1beta1.DeviceAllocationMode) *DeviceRequestApplyConfiguration {
+ b.AllocationMode = &value
+ return b
+}
+
+// WithCount sets the Count field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Count field is set to the value of the last call.
+func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration {
+ b.Count = &value
+ return b
+}
+
+// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdminAccess field is set to the value of the last call.
+func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration {
+ b.AdminAccess = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go
new file mode 100644
index 000000000..c28eb26ab
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
+// with apply.
+type DeviceRequestAllocationResultApplyConfiguration struct {
+ Request *string `json:"request,omitempty"`
+ Driver *string `json:"driver,omitempty"`
+ Pool *string `json:"pool,omitempty"`
+ Device *string `json:"device,omitempty"`
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+}
+
+// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
+// apply.
+func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration {
+ return &DeviceRequestAllocationResultApplyConfiguration{}
+}
+
+// WithRequest sets the Request field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Request field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration {
+ b.Request = &value
+ return b
+}
+
+// WithDriver sets the Driver field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Driver field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration {
+ b.Driver = &value
+ return b
+}
+
+// WithPool sets the Pool field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Pool field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration {
+ b.Pool = &value
+ return b
+}
+
+// WithDevice sets the Device field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Device field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration {
+ b.Device = &value
+ return b
+}
+
+// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdminAccess field is set to the value of the last call.
+func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration {
+ b.AdminAccess = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go
new file mode 100644
index 000000000..bf60bf434
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
+// with apply.
+type DeviceSelectorApplyConfiguration struct {
+ CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
+}
+
+// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with
+// apply.
+func DeviceSelector() *DeviceSelectorApplyConfiguration {
+ return &DeviceSelectorApplyConfiguration{}
+}
+
+// WithCEL sets the CEL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CEL field is set to the value of the last call.
+func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration {
+ b.CEL = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go
new file mode 100644
index 000000000..c9d488019
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go
@@ -0,0 +1,59 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use
+// with apply.
+type NetworkDeviceDataApplyConfiguration struct {
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ IPs []string `json:"ips,omitempty"`
+ HardwareAddress *string `json:"hardwareAddress,omitempty"`
+}
+
+// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with
+// apply.
+func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration {
+ return &NetworkDeviceDataApplyConfiguration{}
+}
+
+// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the InterfaceName field is set to the value of the last call.
+func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration {
+ b.InterfaceName = &value
+ return b
+}
+
+// WithIPs adds the given value to the IPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IPs field.
+func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration {
+ for i := range values {
+ b.IPs = append(b.IPs, values[i])
+ }
+ return b
+}
+
+// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HardwareAddress field is set to the value of the last call.
+func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration {
+ b.HardwareAddress = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go
new file mode 100644
index 000000000..0b52fa93a
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use
+// with apply.
+type OpaqueDeviceConfigurationApplyConfiguration struct {
+ Driver *string `json:"driver,omitempty"`
+ Parameters *runtime.RawExtension `json:"parameters,omitempty"`
+}
+
+// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with
+// apply.
+func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration {
+ return &OpaqueDeviceConfigurationApplyConfiguration{}
+}
+
+// WithDriver sets the Driver field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Driver field is set to the value of the last call.
+func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration {
+ b.Driver = &value
+ return b
+}
+
+// WithParameters sets the Parameters field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Parameters field is set to the value of the last call.
+func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration {
+ b.Parameters = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
similarity index 59%
rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go
rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
index ee8e73ebe..ee16718fd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha3
+package v1beta1
import (
- resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -27,156 +27,156 @@ import (
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
-// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use
+// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
// with apply.
-type PodSchedulingContextApplyConfiguration struct {
+type ResourceClaimApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"`
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
}
-// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with
+// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with
// apply.
-func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration {
- b := &PodSchedulingContextApplyConfiguration{}
+func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
+ b := &ResourceClaimApplyConfiguration{}
b.WithName(name)
b.WithNamespace(namespace)
- b.WithKind("PodSchedulingContext")
- b.WithAPIVersion("resource.k8s.io/v1alpha3")
+ b.WithKind("ResourceClaim")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
return b
}
-// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from
-// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a
-// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
+// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
+// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
-// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API.
-// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow.
+// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
- return extractPodSchedulingContext(podSchedulingContext, fieldManager, "")
+func ExtractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
+ return extractResourceClaim(resourceClaim, fieldManager, "")
}
-// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except
+// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) {
- return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status")
+func ExtractResourceClaimStatus(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
+ return extractResourceClaim(resourceClaim, fieldManager, "status")
}
-func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) {
- b := &PodSchedulingContextApplyConfiguration{}
- err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource)
+func extractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
+ b := &ResourceClaimApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaim"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
- b.WithName(podSchedulingContext.Name)
- b.WithNamespace(podSchedulingContext.Namespace)
+ b.WithName(resourceClaim.Name)
+ b.WithNamespace(resourceClaim.Namespace)
- b.WithKind("PodSchedulingContext")
- b.WithAPIVersion("resource.k8s.io/v1alpha3")
+ b.WithKind("ResourceClaim")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
return b, nil
}
// WithKind sets the Kind field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration {
- b.Kind = &value
+func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration {
- b.APIVersion = &value
+func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the GenerateName field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
// WithNamespace sets the Namespace field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Namespace field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
// WithUID sets the UID field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UID field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ResourceVersion field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
// WithGeneration sets the Generation field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Generation field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,13 +184,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Labels field,
// overwriting an existing map entries in Labels field with the same key.
-func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,13 +199,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]s
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, the entries provided by each call will be put on the Annotations field,
// overwriting an existing map entries in Annotations field with the same key.
-func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -213,13 +213,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,15 +227,15 @@ func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Finalizers field.
-func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
-func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
}
@@ -244,7 +244,7 @@ func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurat
// WithSpec sets the Spec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Spec field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration {
b.Spec = value
return b
}
@@ -252,13 +252,13 @@ func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingCo
// WithStatus sets the Status field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Status field is set to the value of the last call.
-func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration {
+func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration {
b.Status = value
return b
}
// GetName retrieves the value of the Name field in the declarative configuration.
-func (b *PodSchedulingContextApplyConfiguration) GetName() *string {
+func (b *ResourceClaimApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go
new file mode 100644
index 000000000..f6eefdda5
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ types "k8s.io/apimachinery/pkg/types"
+)
+
+// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use
+// with apply.
+type ResourceClaimConsumerReferenceApplyConfiguration struct {
+ APIGroup *string `json:"apiGroup,omitempty"`
+ Resource *string `json:"resource,omitempty"`
+ Name *string `json:"name,omitempty"`
+ UID *types.UID `json:"uid,omitempty"`
+}
+
+// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with
+// apply.
+func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration {
+ return &ResourceClaimConsumerReferenceApplyConfiguration{}
+}
+
+// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIGroup field is set to the value of the last call.
+func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration {
+ b.APIGroup = &value
+ return b
+}
+
+// WithResource sets the Resource field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Resource field is set to the value of the last call.
+func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration {
+ b.Resource = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration {
+ b.UID = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go
new file mode 100644
index 000000000..c6b1b0b4b
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
+// with apply.
+type ResourceClaimSpecApplyConfiguration struct {
+ Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
+}
+
+// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with
+// apply.
+func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration {
+ return &ResourceClaimSpecApplyConfiguration{}
+}
+
+// WithDevices sets the Devices field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Devices field is set to the value of the last call.
+func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration {
+ b.Devices = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go
new file mode 100644
index 000000000..bb3db18be
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
+// with apply.
+type ResourceClaimStatusApplyConfiguration struct {
+ Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
+ Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
+}
+
+// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
+// apply.
+func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration {
+ return &ResourceClaimStatusApplyConfiguration{}
+}
+
+// WithAllocation sets the Allocation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Allocation field is set to the value of the last call.
+func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration {
+ b.Allocation = value
+ return b
+}
+
+// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ReservedFor field.
+func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithReservedFor")
+ }
+ b.ReservedFor = append(b.ReservedFor, *values[i])
+ }
+ return b
+}
+
+// WithDevices adds the given value to the Devices field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Devices field.
+func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithDevices")
+ }
+ b.Devices = append(b.Devices, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go
new file mode 100644
index 000000000..490ecf5e7
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go
@@ -0,0 +1,255 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use
+// with apply.
+type ResourceClaimTemplateApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with
+// apply.
+func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration {
+ b := &ResourceClaimTemplateApplyConfiguration{}
+ b.WithName(name)
+ b.WithNamespace(namespace)
+ b.WithKind("ResourceClaimTemplate")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b
+}
+
+// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
+// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
+}
+
+// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
+}
+
+func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ b := &ResourceClaimTemplateApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaimTemplate"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceClaimTemplate.Name)
+ b.WithNamespace(resourceClaimTemplate.Namespace)
+
+ b.WithKind("ResourceClaimTemplate")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go
new file mode 100644
index 000000000..9df32360f
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go
@@ -0,0 +1,194 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use
+// with apply.
+type ResourceClaimTemplateSpecApplyConfiguration struct {
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with
+// apply.
+func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration {
+ return &ResourceClaimTemplateSpecApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go
new file mode 100644
index 000000000..33c155b52
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go
@@ -0,0 +1,57 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use
+// with apply.
+type ResourcePoolApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Generation *int64 `json:"generation,omitempty"`
+ ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
+}
+
+// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with
+// apply.
+func ResourcePool() *ResourcePoolApplyConfiguration {
+ return &ResourcePoolApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration {
+ b.Generation = &value
+ return b
+}
+
+// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceSliceCount field is set to the value of the last call.
+func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration {
+ b.ResourceSliceCount = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go
new file mode 100644
index 000000000..d169ad101
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go
@@ -0,0 +1,253 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use
+// with apply.
+type ResourceSliceApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with
+// apply.
+func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
+ b := &ResourceSliceApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ResourceSlice")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b
+}
+
+// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
+// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
+// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
+// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
+ return extractResourceSlice(resourceSlice, fieldManager, "")
+}
+
+// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractResourceSliceStatus(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
+ return extractResourceSlice(resourceSlice, fieldManager, "status")
+}
+
+func extractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
+ b := &ResourceSliceApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceSlice"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceSlice.Name)
+
+ b.WithKind("ResourceSlice")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *ResourceSliceApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go
new file mode 100644
index 000000000..75bbb53c8
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go
@@ -0,0 +1,93 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/core/v1"
+)
+
+// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use
+// with apply.
+type ResourceSliceSpecApplyConfiguration struct {
+ Driver *string `json:"driver,omitempty"`
+ Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
+ NodeName *string `json:"nodeName,omitempty"`
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ AllNodes *bool `json:"allNodes,omitempty"`
+ Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
+}
+
+// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with
+// apply.
+func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration {
+ return &ResourceSliceSpecApplyConfiguration{}
+}
+
+// WithDriver sets the Driver field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Driver field is set to the value of the last call.
+func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration {
+ b.Driver = &value
+ return b
+}
+
+// WithPool sets the Pool field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Pool field is set to the value of the last call.
+func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
+ b.Pool = value
+ return b
+}
+
+// WithNodeName sets the NodeName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeName field is set to the value of the last call.
+func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration {
+ b.NodeName = &value
+ return b
+}
+
+// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeSelector field is set to the value of the last call.
+func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
+ b.NodeSelector = value
+ return b
+}
+
+// WithAllNodes sets the AllNodes field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AllNodes field is set to the value of the last call.
+func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration {
+ b.AllNodes = &value
+ return b
+}
+
+// WithDevices adds the given value to the Devices field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Devices field.
+func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithDevices")
+ }
+ b.Devices = append(b.Devices, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
index f2f135abc..24f122cc0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
@@ -21,22 +21,22 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
// with apply.
type PriorityClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Value *int32 `json:"value,omitempty"`
- GlobalDefault *bool `json:"globalDefault,omitempty"`
- Description *string `json:"description,omitempty"`
- PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Value *int32 `json:"value,omitempty"`
+ GlobalDefault *bool `json:"globalDefault,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
}
// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
@@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManage
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,25 +150,25 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
+func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
+func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -214,13 +214,13 @@ func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]str
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration {
+func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,14 +231,14 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
index 098517675..37a50ef6a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
@@ -20,7 +20,7 @@ package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
- v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
return extractPriorityClass(priorityClass, fieldManager, "")
}
// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPriorityClassStatus(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClassStatus(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
return extractPriorityClass(priorityClass, fieldManager, "status")
}
-func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
+func extractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager st
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
index 075862fe3..4b6d52039 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
@@ -20,7 +20,7 @@ package v1beta1
import (
corev1 "k8s.io/api/core/v1"
- v1beta1 "k8s.io/api/scheduling/v1beta1"
+ schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
return extractPriorityClass(priorityClass, fieldManager, "")
}
// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractPriorityClassStatus(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClassStatus(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
return extractPriorityClass(priorityClass, fieldManager, "status")
}
-func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
+func extractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
@@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager str
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority
// If called multiple times, the Name field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC
// If called multiple times, the UID field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i
// overwriting an existing map entries in Labels field with the same key.
func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string)
// overwriting an existing map entries in Annotations field with the same key.
func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne
func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -277,5 +277,5 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *PriorityClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
index 39d835702..6941e4cdc 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apistoragev1 "k8s.io/api/storage/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use
// with apply.
type CSIDriverApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSIDriver constructs a declarative configuration of the CSIDriver type for use with
@@ -56,18 +56,18 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+func ExtractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
return extractCSIDriver(cSIDriver, fieldManager, "")
}
// ExtractCSIDriverStatus is the same as ExtractCSIDriver except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCSIDriverStatus(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+func ExtractCSIDriverStatus(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
return extractCSIDriver(cSIDriver, fieldManager, "status")
}
-func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
+func extractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
b := &CSIDriverApplyConfiguration{}
err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, su
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration {
+func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration {
+func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string)
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration {
+func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *CSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -249,5 +249,5 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSIDriverApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
index b2dcb0fee..1b58c6db8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
)
// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
@@ -27,9 +27,9 @@ import (
type CSIDriverSpecApplyConfiguration struct {
AttachRequired *bool `json:"attachRequired,omitempty"`
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
- VolumeLifecycleModes []v1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
+ VolumeLifecycleModes []storagev1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
StorageCapacity *bool `json:"storageCapacity,omitempty"`
- FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
+ FSGroupPolicy *storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
SELinuxMount *bool `json:"seLinuxMount,omitempty"`
@@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri
// WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field.
-func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration {
+func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration {
for i := range values {
b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i])
}
@@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr
// WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FSGroupPolicy field is set to the value of the last call.
-func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration {
+func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration {
b.FSGroupPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
index 8a53e7984..f31620709 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1
import (
- apistoragev1 "k8s.io/api/storage/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use
// with apply.
type CSINodeApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSINode constructs a declarative configuration of the CSINode type for use with
@@ -56,18 +56,18 @@ func CSINode(name string) *CSINodeApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCSINode(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+func ExtractCSINode(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
return extractCSINode(cSINode, fieldManager, "")
}
// ExtractCSINodeStatus is the same as ExtractCSINode except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCSINodeStatus(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+func ExtractCSINodeStatus(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
return extractCSINode(cSINode, fieldManager, "status")
}
-func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
+func extractCSINode(cSINode *storagev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
b := &CSINodeApplyConfiguration{}
err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource)
if err != nil {
@@ -84,7 +84,7 @@ func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresou
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,25 +146,25 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration {
+func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration {
+func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -210,13 +210,13 @@ func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration {
+func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,14 +227,14 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *CSINodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -249,5 +249,5 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSINodeApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
index 0e293248d..226fb1f70 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
@@ -21,22 +21,22 @@ package v1
import (
storagev1 "k8s.io/api/storage/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
// with apply.
type CSIStorageCapacityApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- Capacity *resource.Quantity `json:"capacity,omitempty"`
- MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ NodeTopology *metav1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ Capacity *resource.Quantity `json:"capacity,omitempty"`
+ MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
}
// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
@@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -152,25 +152,25 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
+func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
+func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,13 +216,13 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[strin
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration {
+func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -233,21 +233,21 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
// WithNodeTopology sets the NodeTopology field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NodeTopology field is set to the value of the last call.
-func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *v1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration {
+func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *metav1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration {
b.NodeTopology = value
return b
}
@@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
index 26d70bc8b..cab39900e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
@@ -21,26 +21,26 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use
// with apply.
type StorageClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Provisioner *string `json:"provisioner,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
- ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
- MountOptions []string `json:"mountOptions,omitempty"`
- AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
- VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
- AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Provisioner *string `json:"provisioner,omitempty"`
+ Parameters map[string]string `json:"parameters,omitempty"`
+ ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
+ MountOptions []string `json:"mountOptions,omitempty"`
+ AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
+ VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
+ AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
}
// StorageClass constructs a declarative configuration of the StorageClass type for use with
@@ -92,7 +92,7 @@ func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager stri
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -154,25 +154,25 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration {
+func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration {
+func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -218,13 +218,13 @@ func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]stri
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration {
+func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -235,14 +235,14 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *StorageClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -318,5 +318,5 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StorageClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
index 72c351208..b28b8c33f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
@@ -19,21 +19,21 @@ limitations under the License.
package v1
import (
- apistoragev1 "k8s.io/api/storage/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
internal "k8s.io/client-go/applyconfigurations/internal"
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
)
// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
// with apply.
type VolumeAttachmentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
- *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
}
// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
@@ -57,18 +57,18 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+func ExtractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
return extractVolumeAttachment(volumeAttachment, fieldManager, "")
}
// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractVolumeAttachmentStatus(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
}
-func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
+func extractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
b := &VolumeAttachmentApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fi
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum
// If called multiple times, the Name field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume
// If called multiple times, the UID field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,25 +147,25 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *
// If called multiple times, the Generation field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
-func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
+func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
-func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
+func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu
// overwriting an existing map entries in Labels field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -211,13 +211,13 @@ func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
-func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration {
+func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,14 +228,14 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O
func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
if b.ObjectMetaApplyConfiguration == nil {
- b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{}
}
}
@@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
index 477855398..1c865c001 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/client-go/applyconfigurations/core/v1"
+ corev1 "k8s.io/client-go/applyconfigurations/core/v1"
)
// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
// with apply.
type VolumeAttachmentSourceApplyConfiguration struct {
- PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
- InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
+ PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
+ InlineVolumeSpec *corev1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
}
// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
@@ -46,7 +46,7 @@ func (b *VolumeAttachmentSourceApplyConfiguration) WithPersistentVolumeName(valu
// WithInlineVolumeSpec sets the InlineVolumeSpec field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the InlineVolumeSpec field is set to the value of the last call.
-func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *v1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration {
+func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *corev1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration {
b.InlineVolumeSpec = value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
index 039e5f32b..c16c5c3af 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
// with apply.
type VolumeErrorApplyConfiguration struct {
- Time *v1.Time `json:"time,omitempty"`
- Message *string `json:"message,omitempty"`
+ Time *metav1.Time `json:"time,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
@@ -38,7 +38,7 @@ func VolumeError() *VolumeErrorApplyConfiguration {
// WithTime sets the Time field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Time field is set to the value of the last call.
-func (b *VolumeErrorApplyConfiguration) WithTime(value v1.Time) *VolumeErrorApplyConfiguration {
+func (b *VolumeErrorApplyConfiguration) WithTime(value metav1.Time) *VolumeErrorApplyConfiguration {
b.Time = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
index aa949e28c..518f7a7f6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
- v1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
@@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
}
// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
}
-func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
+func extractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
@@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
index 9648621ac..b66cf0094 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
@@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum
// If called multiple times, the Name field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume
// If called multiple times, the UID field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *
// If called multiple times, the Generation field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu
// overwriting an existing map entries in Labels field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O
func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
index f95bc5547..898726b62 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
- v1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -57,18 +57,18 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
}
// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
}
-func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
+func extractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
b := &VolumeAttributesClassApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttribut
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *
// If called multiple times, the Name field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string)
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V
// If called multiple times, the UID field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds
// overwriting an existing map entries in Labels field with the same key.
func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]
// overwriting an existing map entries in Annotations field with the same key.
func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...
func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -264,5 +264,5 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *VolumeAttributesClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
index b9a807bd8..0fe9421de 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
@@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverAppl
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64
// overwriting an existing map entries in Labels field with the same key.
func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef
func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSIDriverApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
index 5f4e068f0..e62fe5888 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
@@ -19,20 +19,20 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
)
// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
// with apply.
type CSIDriverSpecApplyConfiguration struct {
- AttachRequired *bool `json:"attachRequired,omitempty"`
- PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
- VolumeLifecycleModes []v1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
- StorageCapacity *bool `json:"storageCapacity,omitempty"`
- FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
- TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
- RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
- SELinuxMount *bool `json:"seLinuxMount,omitempty"`
+ AttachRequired *bool `json:"attachRequired,omitempty"`
+ PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
+ VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
+ StorageCapacity *bool `json:"storageCapacity,omitempty"`
+ FSGroupPolicy *storagev1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
+ TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
+ RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
+ SELinuxMount *bool `json:"seLinuxMount,omitempty"`
}
// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with
@@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri
// WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field.
-func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration {
+func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration {
for i := range values {
b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i])
}
@@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr
// WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the FSGroupPolicy field is set to the value of the last call.
-func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration {
+func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration {
b.FSGroupPolicy = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
index af0f41cf0..4e7ad8997 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
@@ -84,7 +84,7 @@ func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subres
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -146,7 +146,7 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -155,7 +155,7 @@ func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyCon
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -164,7 +164,7 @@ func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CS
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64)
// overwriting an existing map entries in Labels field with the same key.
func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -216,7 +216,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -227,7 +227,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer
func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -249,5 +249,5 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSINodeApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
index 19350e5a6..c8acaf923 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
@@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
}
// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
}
-func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
+func extractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
@@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, f
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI
// If called multiple times, the Name field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS
// If called multiple times, the UID field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string)
// If called multiple times, the Generation field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va
// overwriting an existing map entries in Labels field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str
// overwriting an existing map entries in Annotations field with the same key.
func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1
func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -279,5 +279,5 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *CSIStorageCapacityApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
index fa504a44e..2d211754e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
@@ -20,7 +20,7 @@ package v1beta1
import (
corev1 "k8s.io/api/core/v1"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -39,7 +39,7 @@ type StorageClassApplyConfiguration struct {
ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
MountOptions []string `json:"mountOptions,omitempty"`
AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
- VolumeBindingMode *v1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
+ VolumeBindingMode *storagev1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
}
@@ -64,18 +64,18 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+func ExtractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
return extractStorageClass(storageClass, fieldManager, "")
}
// ExtractStorageClassStatus is the same as ExtractStorageClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractStorageClassStatus(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+func ExtractStorageClassStatus(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
return extractStorageClass(storageClass, fieldManager, "status")
}
-func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
+func extractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
b := &StorageClassApplyConfiguration{}
err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource)
if err != nil {
@@ -92,7 +92,7 @@ func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl
// If called multiple times, the Name field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla
// If called multiple times, the UID field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -154,7 +154,7 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -163,7 +163,7 @@ func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageCla
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -172,7 +172,7 @@ func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in
// overwriting an existing map entries in Labels field with the same key.
func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *
// overwriting an existing map entries in Annotations field with the same key.
func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -224,7 +224,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -235,7 +235,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner
func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -297,7 +297,7 @@ func (b *StorageClassApplyConfiguration) WithAllowVolumeExpansion(value bool) *S
// WithVolumeBindingMode sets the VolumeBindingMode field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VolumeBindingMode field is set to the value of the last call.
-func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value v1beta1.VolumeBindingMode) *StorageClassApplyConfiguration {
+func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value storagev1beta1.VolumeBindingMode) *StorageClassApplyConfiguration {
b.VolumeBindingMode = &value
return b
}
@@ -318,5 +318,5 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StorageClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
index b0711d731..3f7110bf4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
@@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment,
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum
// If called multiple times, the Name field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume
// If called multiple times, the UID field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *
// If called multiple times, the Generation field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu
// overwriting an existing map entries in Labels field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O
func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *VolumeAttachmentApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
index 7b221d277..ab1bda330 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -57,18 +57,18 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
-func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
}
// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
// that it extracts the status subresource applied configuration.
// Experimental!
-func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
}
-func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
+func extractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
b := &VolumeAttributesClassApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource)
if err != nil {
@@ -85,7 +85,7 @@ func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttribute
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *
// If called multiple times, the Name field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string)
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V
// If called multiple times, the UID field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri
// If called multiple times, the Generation field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds
// overwriting an existing map entries in Labels field with the same key.
func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]
// overwriting an existing map entries in Annotations field with the same key.
func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...
func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -264,5 +264,5 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *VolumeAttributesClassApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
index dcdbc60c7..5ffd572ee 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
@@ -20,18 +20,18 @@ package v1alpha1
import (
v1 "k8s.io/api/core/v1"
- v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use
// with apply.
type MigrationConditionApplyConfiguration struct {
- Type *v1alpha1.MigrationConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ Type *storagemigrationv1alpha1.MigrationConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
}
// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with
@@ -43,7 +43,7 @@ func MigrationCondition() *MigrationConditionApplyConfiguration {
// WithType sets the Type field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Type field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithType(value v1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration {
+func (b *MigrationConditionApplyConfiguration) WithType(value storagemigrationv1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration {
b.Type = &value
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
index 7e6452a77..a6dbc13a5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
@@ -85,7 +85,7 @@ func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1a
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Kind field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *StorageVersionMigrationApplyConfiguration {
- b.Kind = &value
+ b.TypeMetaApplyConfiguration.Kind = &value
return b
}
@@ -93,7 +93,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *Stor
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the APIVersion field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) *StorageVersionMigrationApplyConfiguration {
- b.APIVersion = &value
+ b.TypeMetaApplyConfiguration.APIVersion = &value
return b
}
@@ -102,7 +102,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string)
// If called multiple times, the Name field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Name = &value
+ b.ObjectMetaApplyConfiguration.Name = &value
return b
}
@@ -111,7 +111,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *Stor
// If called multiple times, the GenerateName field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.GenerateName = &value
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
return b
}
@@ -120,7 +120,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value strin
// If called multiple times, the Namespace field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Namespace = &value
+ b.ObjectMetaApplyConfiguration.Namespace = &value
return b
}
@@ -129,7 +129,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string)
// If called multiple times, the UID field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.UID = &value
+ b.ObjectMetaApplyConfiguration.UID = &value
return b
}
@@ -138,7 +138,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *St
// If called multiple times, the ResourceVersion field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.ResourceVersion = &value
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
return b
}
@@ -147,7 +147,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value st
// If called multiple times, the Generation field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.Generation = &value
+ b.ObjectMetaApplyConfiguration.Generation = &value
return b
}
@@ -156,7 +156,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64)
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.CreationTimestamp = &value
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
return b
}
@@ -165,7 +165,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionTimestamp = &value
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
return b
}
@@ -174,7 +174,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- b.DeletionGracePeriodSeconds = &value
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
return b
}
@@ -184,11 +184,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSecon
// overwriting an existing map entries in Labels field with the same key.
func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Labels == nil && len(entries) > 0 {
- b.Labels = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Labels[k] = v
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
}
return b
}
@@ -199,11 +199,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[strin
// overwriting an existing map entries in Annotations field with the same key.
func (b *StorageVersionMigrationApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
- if b.Annotations == nil && len(entries) > 0 {
- b.Annotations = make(map[string]string, len(entries))
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
}
for k, v := range entries {
- b.Annotations[k] = v
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
}
return b
}
@@ -217,7 +217,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values .
if values[i] == nil {
panic("nil value passed to WithOwnerReferences")
}
- b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
}
return b
}
@@ -228,7 +228,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values .
func (b *StorageVersionMigrationApplyConfiguration) WithFinalizers(values ...string) *StorageVersionMigrationApplyConfiguration {
b.ensureObjectMetaApplyConfigurationExists()
for i := range values {
- b.Finalizers = append(b.Finalizers, values[i])
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
}
return b
}
@@ -258,5 +258,5 @@ func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVer
// GetName retrieves the value of the Name field in the declarative configuration.
func (b *StorageVersionMigrationApplyConfiguration) GetName() *string {
b.ensureObjectMetaApplyConfigurationExists()
- return b.Name
+ return b.ObjectMetaApplyConfiguration.Name
}
diff --git a/vendor/k8s.io/client-go/features/features.go b/vendor/k8s.io/client-go/features/features.go
index afb67f509..5ccdcc55f 100644
--- a/vendor/k8s.io/client-go/features/features.go
+++ b/vendor/k8s.io/client-go/features/features.go
@@ -18,9 +18,9 @@ package features
import (
"errors"
+ "sync/atomic"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "sync/atomic"
)
// NOTE: types Feature, FeatureSpec, prerelease (and its values)
diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go
index 0c972a46f..a74f6a833 100644
--- a/vendor/k8s.io/client-go/features/known_features.go
+++ b/vendor/k8s.io/client-go/features/known_features.go
@@ -28,6 +28,31 @@ const (
// of code conflicts because changes are more likely to be scattered
// across the file.
+ // owner: @benluddy
+ // kep: https://kep.k8s.io/4222
+ // alpha: 1.32
+ //
+ // If disabled, clients configured to accept "application/cbor" will instead accept
+ // "application/json" with the same relative preference, and clients configured to write
+ // "application/cbor" or "application/apply-patch+cbor" will instead write
+ // "application/json" or "application/apply-patch+yaml", respectively.
+ ClientsAllowCBOR Feature = "ClientsAllowCBOR"
+
+ // owner: @benluddy
+ // kep: https://kep.k8s.io/4222
+ // alpha: 1.32
+ //
+ // If enabled, and only if ClientsAllowCBOR is also enabled, the default request content
+ // type (if not explicitly configured) and the dynamic client's request content type both
+ // become "application/cbor" instead of "application/json". The default content type for
+ // apply patch requests becomes "application/apply-patch+cbor" instead of
+ // "application/apply-patch+yaml".
+ ClientsPreferCBOR Feature = "ClientsPreferCBOR"
+
+ // owner: @nilekhc
+ // alpha: v1.30
+ InformerResourceVersion Feature = "InformerResourceVersion"
+
// owner: @p0lyn0mial
// beta: v1.30
//
@@ -37,10 +62,6 @@ const (
// The feature is disabled in Beta by default because
// it will only be turned on for selected control plane component(s).
WatchListClient Feature = "WatchListClient"
-
- // owner: @nilekhc
- // alpha: v1.30
- InformerResourceVersion Feature = "InformerResourceVersion"
)
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
@@ -49,6 +70,8 @@ const (
// After registering with the binary, the features are, by default, controllable using environment variables.
// For more details, please see envVarFeatureGates implementation.
var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{
- WatchListClient: {Default: false, PreRelease: Beta},
+ ClientsAllowCBOR: {Default: false, PreRelease: Alpha},
+ ClientsPreferCBOR: {Default: false, PreRelease: Alpha},
InformerResourceVersion: {Default: false, PreRelease: Alpha},
+ WatchListClient: {Default: false, PreRelease: Beta},
}
diff --git a/vendor/k8s.io/client-go/gentype/fake.go b/vendor/k8s.io/client-go/gentype/fake.go
new file mode 100644
index 000000000..bcb9ca27f
--- /dev/null
+++ b/vendor/k8s.io/client-go/gentype/fake.go
@@ -0,0 +1,304 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gentype
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClient represents a fake client
+type FakeClient[T objectWithMeta] struct {
+ *testing.Fake
+ ns string
+ resource schema.GroupVersionResource
+ kind schema.GroupVersionKind
+ newObject func() T
+}
+
+// FakeClientWithList represents a fake client with support for lists.
+type FakeClientWithList[T objectWithMeta, L runtime.Object] struct {
+ *FakeClient[T]
+ alsoFakeLister[T, L]
+}
+
+// FakeClientWithApply represents a fake client with support for apply declarative configurations.
+type FakeClientWithApply[T objectWithMeta, C namedObject] struct {
+ *FakeClient[T]
+ alsoFakeApplier[T, C]
+}
+
+// FakeClientWithListAndApply represents a fake client with support for lists and apply declarative configurations.
+type FakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct {
+ *FakeClient[T]
+ alsoFakeLister[T, L]
+ alsoFakeApplier[T, C]
+}
+
+// Helper types for composition
+type alsoFakeLister[T objectWithMeta, L runtime.Object] struct {
+ client *FakeClient[T]
+ newList func() L
+ copyListMeta func(L, L)
+ getItems func(L) []T
+ setItems func(L, []T)
+}
+
+type alsoFakeApplier[T objectWithMeta, C namedObject] struct {
+ client *FakeClient[T]
+}
+
+// NewFakeClient constructs a fake client, namespaced or not, with no support for lists or apply.
+// Non-namespaced clients are constructed by passing an empty namespace ("").
+func NewFakeClient[T objectWithMeta](
+ fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T,
+) *FakeClient[T] {
+ return &FakeClient[T]{fake, namespace, resource, kind, emptyObjectCreator}
+}
+
+// NewFakeClientWithList constructs a namespaced client with support for lists.
+func NewFakeClientWithList[T objectWithMeta, L runtime.Object](
+ fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T,
+ emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T),
+) *FakeClientWithList[T, L] {
+ fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator)
+ return &FakeClientWithList[T, L]{
+ fakeClient,
+ alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter},
+ }
+}
+
+// NewFakeClientWithApply constructs a namespaced client with support for apply declarative configurations.
+func NewFakeClientWithApply[T objectWithMeta, C namedObject](
+ fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T,
+) *FakeClientWithApply[T, C] {
+ fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator)
+ return &FakeClientWithApply[T, C]{
+ fakeClient,
+ alsoFakeApplier[T, C]{fakeClient},
+ }
+}
+
+// NewFakeClientWithListAndApply constructs a client with support for lists and applying declarative configurations.
+func NewFakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject](
+ fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T,
+ emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T),
+) *FakeClientWithListAndApply[T, L, C] {
+ fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator)
+ return &FakeClientWithListAndApply[T, L, C]{
+ fakeClient,
+ alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter},
+ alsoFakeApplier[T, C]{fakeClient},
+ }
+}
+
+// Get takes name of a resource, and returns the corresponding object, and an error if there is any.
+func (c *FakeClient[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) {
+ emptyResult := c.newObject()
+
+ obj, err := c.Fake.
+ Invokes(testing.NewGetActionWithOptions(c.resource, c.ns, name, options), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+func ToPointerSlice[T any](src []T) []*T {
+ if src == nil {
+ return nil
+ }
+ result := make([]*T, len(src))
+ for i := range src {
+ result[i] = &src[i]
+ }
+ return result
+}
+
+func FromPointerSlice[T any](src []*T) []T {
+ if src == nil {
+ return nil
+ }
+ result := make([]T, len(src))
+ for i := range src {
+ result[i] = *src[i]
+ }
+ return result
+}
+
+// List takes label and field selectors, and returns the list of resources that match those selectors.
+func (l *alsoFakeLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (result L, err error) {
+ emptyResult := l.newList()
+ obj, err := l.client.Fake.
+ Invokes(testing.NewListActionWithOptions(l.client.resource, l.client.kind, l.client.ns, opts), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ // Everything matches
+ return obj.(L), nil
+ }
+ list := l.newList()
+ l.copyListMeta(list, obj.(L))
+ var items []T
+ for _, item := range l.getItems(obj.(L)) {
+ itemMeta, err := meta.Accessor(item)
+ if err != nil {
+ // No ObjectMeta, nothing can match
+ continue
+ }
+ if label.Matches(labels.Set(itemMeta.GetLabels())) {
+ items = append(items, item)
+ }
+ }
+ l.setItems(list, items)
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested resources.
+func (c *FakeClient[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchActionWithOptions(c.resource, c.ns, opts))
+}
+
+// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any.
+func (c *FakeClient[T]) Create(ctx context.Context, resource T, opts metav1.CreateOptions) (result T, err error) {
+ emptyResult := c.newObject()
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any.
+func (c *FakeClient[T]) Update(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) {
+ emptyResult := c.newObject()
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+// UpdateStatus updates the resource's status and returns the updated resource.
+func (c *FakeClient[T]) UpdateStatus(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) {
+ emptyResult := c.newObject()
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceActionWithOptions(c.resource, "status", c.ns, resource, opts), emptyResult)
+
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+// Delete deletes the resource matching the given name. Returns an error if one occurs.
+func (c *FakeClient[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(c.resource, c.ns, name, opts), c.newObject())
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (l *alsoFakeLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ _, err := l.client.Fake.
+ Invokes(testing.NewDeleteCollectionActionWithOptions(l.client.resource, l.client.ns, opts, listOpts), l.newList())
+ return err
+}
+
+// Patch applies the patch and returns the patched resource.
+func (c *FakeClient[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result T, err error) {
+ emptyResult := c.newObject()
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceActionWithOptions(c.resource, c.ns, name, pt, data, opts, subresources...), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied resource.
+func (a *alsoFakeApplier[T, C]) Apply(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) {
+ if configuration == *new(C) {
+ return *new(T), fmt.Errorf("configuration provided to Apply must not be nil")
+ }
+ data, err := json.Marshal(configuration)
+ if err != nil {
+ return *new(T), err
+ }
+ name := configuration.GetName()
+ if name == nil {
+ return *new(T), fmt.Errorf("configuration.Name must be provided to Apply")
+ }
+ emptyResult := a.client.newObject()
+ obj, err := a.client.Fake.
+ Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult)
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+// ApplyStatus applies the given apply declarative configuration to the resource's status and returns the updated resource.
+func (a *alsoFakeApplier[T, C]) ApplyStatus(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) {
+ if configuration == *new(C) {
+ return *new(T), fmt.Errorf("configuration provided to Apply must not be nil")
+ }
+ data, err := json.Marshal(configuration)
+ if err != nil {
+ return *new(T), err
+ }
+ name := configuration.GetName()
+ if name == nil {
+ return *new(T), fmt.Errorf("configuration.Name must be provided to Apply")
+ }
+ emptyResult := a.client.newObject()
+ obj, err := a.client.Fake.
+ Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult)
+
+ if obj == nil {
+ return emptyResult, err
+ }
+ return obj.(T), err
+}
+
+func (c *FakeClient[T]) Namespace() string {
+ return c.ns
+}
+
+func (c *FakeClient[T]) Kind() schema.GroupVersionKind {
+ return c.kind
+}
+
+func (c *FakeClient[T]) Resource() schema.GroupVersionResource {
+ return c.resource
+}
diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go
index b5be84318..e6ed6aae7 100644
--- a/vendor/k8s.io/client-go/gentype/type.go
+++ b/vendor/k8s.io/client-go/gentype/type.go
@@ -18,7 +18,6 @@ package gentype
import (
"context"
- json "encoding/json"
"fmt"
"time"
@@ -27,6 +26,7 @@ import (
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
+ "k8s.io/client-go/util/apply"
"k8s.io/client-go/util/consistencydetector"
"k8s.io/client-go/util/watchlist"
"k8s.io/klog/v2"
@@ -51,6 +51,8 @@ type Client[T objectWithMeta] struct {
namespace string // "" for non-namespaced clients
newObject func() T
parameterCodec runtime.ParameterCodec
+
+ prefersProtobuf bool
}
// ClientWithList represents a client with support for lists.
@@ -82,26 +84,37 @@ type alsoApplier[T objectWithMeta, C namedObject] struct {
client *Client[T]
}
+type Option[T objectWithMeta] func(*Client[T])
+
+func PrefersProtobuf[T objectWithMeta]() Option[T] {
+ return func(c *Client[T]) { c.prefersProtobuf = true }
+}
+
// NewClient constructs a client, namespaced or not, with no support for lists or apply.
// Non-namespaced clients are constructed by passing an empty namespace ("").
func NewClient[T objectWithMeta](
resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
+ options ...Option[T],
) *Client[T] {
- return &Client[T]{
+ c := &Client[T]{
resource: resource,
client: client,
parameterCodec: parameterCodec,
namespace: namespace,
newObject: emptyObjectCreator,
}
+ for _, option := range options {
+ option(c)
+ }
+ return c
}
// NewClientWithList constructs a namespaced client with support for lists.
func NewClientWithList[T objectWithMeta, L runtime.Object](
resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
- emptyListCreator func() L,
+ emptyListCreator func() L, options ...Option[T],
) *ClientWithList[T, L] {
- typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
+ typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...)
return &ClientWithList[T, L]{
typeClient,
alsoLister[T, L]{typeClient, emptyListCreator},
@@ -111,8 +124,9 @@ func NewClientWithList[T objectWithMeta, L runtime.Object](
// NewClientWithApply constructs a namespaced client with support for apply declarative configurations.
func NewClientWithApply[T objectWithMeta, C namedObject](
resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
+ options ...Option[T],
) *ClientWithApply[T, C] {
- typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
+ typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...)
return &ClientWithApply[T, C]{
typeClient,
alsoApplier[T, C]{typeClient},
@@ -122,9 +136,9 @@ func NewClientWithApply[T objectWithMeta, C namedObject](
// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations.
func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject](
resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T,
- emptyListCreator func() L,
+ emptyListCreator func() L, options ...Option[T],
) *ClientWithListAndApply[T, L, C] {
- typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator)
+ typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...)
return &ClientWithListAndApply[T, L, C]{
typeClient,
alsoLister[T, L]{typeClient, emptyListCreator},
@@ -146,6 +160,7 @@ func (c *Client[T]) GetNamespace() string {
func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) {
result := c.newObject()
err := c.client.Get().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
Name(name).
@@ -181,6 +196,7 @@ func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
err := l.client.client.Get().
+ UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf).
NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
Resource(l.client.resource).
VersionedParams(&opts, l.client.parameterCodec).
@@ -198,6 +214,7 @@ func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOption
}
result = l.newList()
err = l.client.client.Get().
+ UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf).
NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
Resource(l.client.resource).
VersionedParams(&opts, l.client.parameterCodec).
@@ -215,6 +232,7 @@ func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I
}
opts.Watch = true
return c.client.Get().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
VersionedParams(&opts, c.parameterCodec).
@@ -226,6 +244,7 @@ func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.I
func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) {
result := c.newObject()
err := c.client.Post().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
VersionedParams(&opts, c.parameterCodec).
@@ -239,6 +258,7 @@ func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions
func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) {
result := c.newObject()
err := c.client.Put().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
Name(obj.GetName()).
@@ -253,6 +273,7 @@ func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions
func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) {
result := c.newObject()
err := c.client.Put().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
Name(obj.GetName()).
@@ -267,6 +288,7 @@ func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateO
// Delete takes name of the resource and deletes it. Returns an error if one occurs.
func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
Name(name).
@@ -282,6 +304,7 @@ func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.Del
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return l.client.client.Delete().
+ UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf).
NamespaceIfScoped(l.client.namespace, l.client.namespace != "").
Resource(l.client.resource).
VersionedParams(&listOpts, l.client.parameterCodec).
@@ -295,6 +318,7 @@ func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.Del
func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) {
result := c.newObject()
err := c.client.Patch(pt).
+ UseProtobufAsDefaultIfPreferred(c.prefersProtobuf).
NamespaceIfScoped(c.namespace, c.namespace != "").
Resource(c.resource).
Name(name).
@@ -313,19 +337,21 @@ func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyO
return *new(T), fmt.Errorf("object provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(obj)
- if err != nil {
- return *new(T), err
- }
if obj.GetName() == nil {
return *new(T), fmt.Errorf("obj.Name must be provided to Apply")
}
- err = a.client.client.Patch(types.ApplyPatchType).
+
+ request, err := apply.NewRequest(a.client.client, obj)
+ if err != nil {
+ return *new(T), err
+ }
+
+ err = request.
+ UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf).
NamespaceIfScoped(a.client.namespace, a.client.namespace != "").
Resource(a.client.resource).
Name(*obj.GetName()).
VersionedParams(&patchOpts, a.client.parameterCodec).
- Body(data).
Do(ctx).
Into(result)
return result, err
@@ -337,23 +363,24 @@ func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.
return *new(T), fmt.Errorf("object provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(obj)
- if err != nil {
- return *new(T), err
- }
if obj.GetName() == nil {
return *new(T), fmt.Errorf("obj.Name must be provided to Apply")
}
+ request, err := apply.NewRequest(a.client.client, obj)
+ if err != nil {
+ return *new(T), err
+ }
+
result := a.client.newObject()
- err = a.client.client.Patch(types.ApplyPatchType).
+ err = request.
+ UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf).
NamespaceIfScoped(a.client.namespace, a.client.namespace != "").
Resource(a.client.resource).
Name(*obj.GetName()).
SubResource("status").
VersionedParams(&patchOpts, a.client.parameterCodec).
- Body(data).
Do(ctx).
Into(result)
return result, err
diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go
index 9cddb0bbe..a6dbc23a9 100644
--- a/vendor/k8s.io/client-go/kubernetes/clientset.go
+++ b/vendor/k8s.io/client-go/kubernetes/clientset.go
@@ -19,8 +19,8 @@ limitations under the License.
package kubernetes
import (
- "fmt"
- "net/http"
+ fmt "fmt"
+ http "net/http"
discovery "k8s.io/client-go/discovery"
admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
@@ -45,7 +45,7 @@ import (
certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1"
certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
- coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1"
+ coordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2"
coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1"
@@ -69,6 +69,7 @@ import (
rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1"
rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3"
+ resourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1"
schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1"
schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1"
@@ -103,7 +104,7 @@ type Interface interface {
CertificatesV1() certificatesv1.CertificatesV1Interface
CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface
CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface
- CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface
+ CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface
CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface
CoordinationV1() coordinationv1.CoordinationV1Interface
CoreV1() corev1.CoreV1Interface
@@ -127,6 +128,7 @@ type Interface interface {
RbacV1() rbacv1.RbacV1Interface
RbacV1beta1() rbacv1beta1.RbacV1beta1Interface
RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface
+ ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface
ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface
SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface
SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface
@@ -161,7 +163,7 @@ type Clientset struct {
certificatesV1 *certificatesv1.CertificatesV1Client
certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client
certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client
- coordinationV1alpha1 *coordinationv1alpha1.CoordinationV1alpha1Client
+ coordinationV1alpha2 *coordinationv1alpha2.CoordinationV1alpha2Client
coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client
coordinationV1 *coordinationv1.CoordinationV1Client
coreV1 *corev1.CoreV1Client
@@ -185,6 +187,7 @@ type Clientset struct {
rbacV1 *rbacv1.RbacV1Client
rbacV1beta1 *rbacv1beta1.RbacV1beta1Client
rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client
+ resourceV1beta1 *resourcev1beta1.ResourceV1beta1Client
resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client
schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client
schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client
@@ -300,9 +303,9 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al
return c.certificatesV1alpha1
}
-// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client
-func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface {
- return c.coordinationV1alpha1
+// CoordinationV1alpha2 retrieves the CoordinationV1alpha2Client
+func (c *Clientset) CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface {
+ return c.coordinationV1alpha2
}
// CoordinationV1beta1 retrieves the CoordinationV1beta1Client
@@ -420,6 +423,11 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
return c.rbacV1alpha1
}
+// ResourceV1beta1 retrieves the ResourceV1beta1Client
+func (c *Clientset) ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface {
+ return c.resourceV1beta1
+}
+
// ResourceV1alpha3 retrieves the ResourceV1alpha3Client
func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface {
return c.resourceV1alpha3
@@ -588,7 +596,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
- cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ cs.coordinationV1alpha2, err = coordinationv1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
@@ -684,6 +692,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
+ cs.resourceV1beta1, err = resourcev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -758,7 +770,7 @@ func New(c rest.Interface) *Clientset {
cs.certificatesV1 = certificatesv1.New(c)
cs.certificatesV1beta1 = certificatesv1beta1.New(c)
cs.certificatesV1alpha1 = certificatesv1alpha1.New(c)
- cs.coordinationV1alpha1 = coordinationv1alpha1.New(c)
+ cs.coordinationV1alpha2 = coordinationv1alpha2.New(c)
cs.coordinationV1beta1 = coordinationv1beta1.New(c)
cs.coordinationV1 = coordinationv1.New(c)
cs.coreV1 = corev1.New(c)
@@ -782,6 +794,7 @@ func New(c rest.Interface) *Clientset {
cs.rbacV1 = rbacv1.New(c)
cs.rbacV1beta1 = rbacv1beta1.New(c)
cs.rbacV1alpha1 = rbacv1alpha1.New(c)
+ cs.resourceV1beta1 = resourcev1beta1.New(c)
cs.resourceV1alpha3 = resourcev1alpha3.New(c)
cs.schedulingV1alpha1 = schedulingv1alpha1.New(c)
cs.schedulingV1beta1 = schedulingv1beta1.New(c)
diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
index 5262b0f04..a9a5d8eb7 100644
--- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go
+++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go
@@ -41,7 +41,7 @@ import (
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
- coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1"
+ coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
discoveryv1 "k8s.io/api/discovery/v1"
@@ -65,6 +65,7 @@ import (
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
@@ -104,7 +105,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
certificatesv1.AddToScheme,
certificatesv1beta1.AddToScheme,
certificatesv1alpha1.AddToScheme,
- coordinationv1alpha1.AddToScheme,
+ coordinationv1alpha2.AddToScheme,
coordinationv1beta1.AddToScheme,
coordinationv1.AddToScheme,
corev1.AddToScheme,
@@ -128,6 +129,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
rbacv1.AddToScheme,
rbacv1beta1.AddToScheme,
rbacv1alpha1.AddToScheme,
+ resourcev1beta1.AddToScheme,
resourcev1alpha3.AddToScheme,
schedulingv1alpha1.AddToScheme,
schedulingv1beta1.AddToScheme,
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go
index a81b2b682..74d2967f6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := admissionregistrationv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
index e863766c6..d46a3c987 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
+ applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,38 @@ type MutatingWebhookConfigurationsGetter interface {
// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources.
type MutatingWebhookConfigurationInterface interface {
- Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error)
- Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error)
+ Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error)
+ Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.MutatingWebhookConfigurationList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error)
- Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error)
+ Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error)
MutatingWebhookConfigurationExpansion
}
// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
type mutatingWebhookConfigurations struct {
- *gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]
}
// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations
func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations {
return &mutatingWebhookConfigurations{
- gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration](
"mutatingwebhookconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} },
- func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }),
+ func() *admissionregistrationv1.MutatingWebhookConfiguration {
+ return &admissionregistrationv1.MutatingWebhookConfiguration{}
+ },
+ func() *admissionregistrationv1.MutatingWebhookConfigurationList {
+ return &admissionregistrationv1.MutatingWebhookConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1.MutatingWebhookConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
index 1b20e6960..2d56ab168 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
+ applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface {
// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources.
type ValidatingAdmissionPolicyInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error)
- Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error)
+ Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error)
+ Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error)
+ UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicy, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error)
- Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error)
+ ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error)
ValidatingAdmissionPolicyExpansion
}
// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
type validatingAdmissionPolicies struct {
- *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]
}
// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies {
return &validatingAdmissionPolicies{
- gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration](
"validatingadmissionpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} },
- func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }),
+ func() *admissionregistrationv1.ValidatingAdmissionPolicy {
+ return &admissionregistrationv1.ValidatingAdmissionPolicy{}
+ },
+ func() *admissionregistrationv1.ValidatingAdmissionPolicyList {
+ return &admissionregistrationv1.ValidatingAdmissionPolicyList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicy](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
index 44694b232..d3eaa0d2d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
+ applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface {
// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources.
type ValidatingAdmissionPolicyBindingInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicyBinding, error)
- Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicyBinding, error)
+ Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error)
+ Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicyBinding, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyBindingList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBindingList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error)
- Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error)
ValidatingAdmissionPolicyBindingExpansion
}
// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
type validatingAdmissionPolicyBindings struct {
- *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]
}
// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings {
return &validatingAdmissionPolicyBindings{
- gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration](
"validatingadmissionpolicybindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} },
- func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }),
+ func() *admissionregistrationv1.ValidatingAdmissionPolicyBinding {
+ return &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}
+ },
+ func() *admissionregistrationv1.ValidatingAdmissionPolicyBindingList {
+ return &admissionregistrationv1.ValidatingAdmissionPolicyBindingList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicyBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
index 11b4ac059..f8f60f681 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/admissionregistration/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
+ applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface {
// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources.
type ValidatingWebhookConfigurationInterface interface {
- Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error)
- Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error)
+ Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error)
+ Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingWebhookConfigurationList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error)
- Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error)
+ Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error)
ValidatingWebhookConfigurationExpansion
}
// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
type validatingWebhookConfigurations struct {
- *gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]
}
// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations
func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations {
return &validatingWebhookConfigurations{
- gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration](
"validatingwebhookconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} },
- func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }),
+ func() *admissionregistrationv1.ValidatingWebhookConfiguration {
+ return &admissionregistrationv1.ValidatingWebhookConfiguration{}
+ },
+ func() *admissionregistrationv1.ValidatingWebhookConfigurationList {
+ return &admissionregistrationv1.ValidatingWebhookConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingWebhookConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go
index f6102d25a..f8a67c6d8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go
@@ -19,15 +19,17 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type AdmissionregistrationV1alpha1Interface interface {
RESTClient() rest.Interface
+ MutatingAdmissionPoliciesGetter
+ MutatingAdmissionPolicyBindingsGetter
ValidatingAdmissionPoliciesGetter
ValidatingAdmissionPolicyBindingsGetter
}
@@ -37,6 +39,14 @@ type AdmissionregistrationV1alpha1Client struct {
restClient rest.Interface
}
+func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface {
+ return newMutatingAdmissionPolicies(c)
+}
+
+func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface {
+ return newMutatingAdmissionPolicyBindings(c)
+}
+
func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface {
return newValidatingAdmissionPolicies(c)
}
@@ -90,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := admissionregistrationv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go
index 94562da59..676578c63 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go
@@ -18,6 +18,10 @@ limitations under the License.
package v1alpha1
+type MutatingAdmissionPolicyExpansion interface{}
+
+type MutatingAdmissionPolicyBindingExpansion interface{}
+
type ValidatingAdmissionPolicyExpansion interface{}
type ValidatingAdmissionPolicyBindingExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
new file mode 100644
index 000000000..4a781a602
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ context "context"
+
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// MutatingAdmissionPoliciesGetter has a method to return a MutatingAdmissionPolicyInterface.
+// A group's client should implement this interface.
+type MutatingAdmissionPoliciesGetter interface {
+ MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface
+}
+
+// MutatingAdmissionPolicyInterface has methods to work with MutatingAdmissionPolicy resources.
+type MutatingAdmissionPolicyInterface interface {
+ Create(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error)
+ Update(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error)
+ Apply(ctx context.Context, mutatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error)
+ MutatingAdmissionPolicyExpansion
+}
+
+// mutatingAdmissionPolicies implements MutatingAdmissionPolicyInterface
+type mutatingAdmissionPolicies struct {
+ *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration]
+}
+
+// newMutatingAdmissionPolicies returns a MutatingAdmissionPolicies
+func newMutatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicies {
+ return &mutatingAdmissionPolicies{
+ gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration](
+ "mutatingadmissionpolicies",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *admissionregistrationv1alpha1.MutatingAdmissionPolicy {
+ return &admissionregistrationv1alpha1.MutatingAdmissionPolicy{}
+ },
+ func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyList {
+ return &admissionregistrationv1alpha1.MutatingAdmissionPolicyList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicy](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
new file mode 100644
index 000000000..78057e200
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ context "context"
+
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// MutatingAdmissionPolicyBindingsGetter has a method to return a MutatingAdmissionPolicyBindingInterface.
+// A group's client should implement this interface.
+type MutatingAdmissionPolicyBindingsGetter interface {
+ MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface
+}
+
+// MutatingAdmissionPolicyBindingInterface has methods to work with MutatingAdmissionPolicyBinding resources.
+type MutatingAdmissionPolicyBindingInterface interface {
+ Create(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error)
+ Update(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error)
+ Apply(ctx context.Context, mutatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error)
+ MutatingAdmissionPolicyBindingExpansion
+}
+
+// mutatingAdmissionPolicyBindings implements MutatingAdmissionPolicyBindingInterface
+type mutatingAdmissionPolicyBindings struct {
+ *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration]
+}
+
+// newMutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindings
+func newMutatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicyBindings {
+ return &mutatingAdmissionPolicyBindings{
+ gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration](
+ "mutatingadmissionpolicybindings",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding {
+ return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}
+ },
+ func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList {
+ return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
index c2b7c825c..ce2328b12 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
+ applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface {
// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources.
type ValidatingAdmissionPolicyInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
- Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
+ Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error)
+ Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
+ UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
- Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error)
+ ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error)
ValidatingAdmissionPolicyExpansion
}
// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
type validatingAdmissionPolicies struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]
}
// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies {
return &validatingAdmissionPolicies{
- gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration](
"validatingadmissionpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} },
- func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }),
+ func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicy {
+ return &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}
+ },
+ func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList {
+ return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
index d8d0796ea..6236ea90c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
+ applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,34 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface {
// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources.
type ValidatingAdmissionPolicyBindingInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error)
- Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error)
+ Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error)
+ Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error)
- Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error)
ValidatingAdmissionPolicyBindingExpansion
}
// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
type validatingAdmissionPolicyBindings struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]
}
// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings {
return &validatingAdmissionPolicyBindings{
- gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration](
"validatingadmissionpolicybindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} },
- func() *v1alpha1.ValidatingAdmissionPolicyBindingList {
- return &v1alpha1.ValidatingAdmissionPolicyBindingList{}
- }),
+ func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding {
+ return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}
+ },
+ func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList {
+ return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
index 5a0a17d9b..16c42b0ec 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := admissionregistrationv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 7a5bc8b9b..17e3541cc 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,38 @@ type MutatingWebhookConfigurationsGetter interface {
// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources.
type MutatingWebhookConfigurationInterface interface {
- Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error)
- Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error)
+ Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error)
+ Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.MutatingWebhookConfigurationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error)
- Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error)
+ Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error)
MutatingWebhookConfigurationExpansion
}
// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
type mutatingWebhookConfigurations struct {
- *gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]
}
// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations
func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations {
return &mutatingWebhookConfigurations{
- gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration](
"mutatingwebhookconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} },
- func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }),
+ func() *admissionregistrationv1beta1.MutatingWebhookConfiguration {
+ return &admissionregistrationv1beta1.MutatingWebhookConfiguration{}
+ },
+ func() *admissionregistrationv1beta1.MutatingWebhookConfigurationList {
+ return &admissionregistrationv1beta1.MutatingWebhookConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1beta1.MutatingWebhookConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
index 0023d8837..2c663ba1e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface {
// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources.
type ValidatingAdmissionPolicyInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
- Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error)
+ Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error)
- Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
+ ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error)
ValidatingAdmissionPolicyExpansion
}
// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
type validatingAdmissionPolicies struct {
- *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]
}
// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies {
return &validatingAdmissionPolicies{
- gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration](
"validatingadmissionpolicies",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} },
- func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }),
+ func() *admissionregistrationv1beta1.ValidatingAdmissionPolicy {
+ return &admissionregistrationv1beta1.ValidatingAdmissionPolicy{}
+ },
+ func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyList {
+ return &admissionregistrationv1beta1.ValidatingAdmissionPolicyList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicy](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
index 8168d8cbc..196cc8f0a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,34 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface {
// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources.
type ValidatingAdmissionPolicyBindingInterface interface {
- Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
- Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
+ Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error)
+ Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error)
- Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error)
ValidatingAdmissionPolicyBindingExpansion
}
// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
type validatingAdmissionPolicyBindings struct {
- *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]
}
// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings {
return &validatingAdmissionPolicyBindings{
- gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration](
"validatingadmissionpolicybindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} },
- func() *v1beta1.ValidatingAdmissionPolicyBindingList {
- return &v1beta1.ValidatingAdmissionPolicyBindingList{}
- }),
+ func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding {
+ return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}
+ },
+ func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList {
+ return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index 5abd96823..9f28346e8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,34 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface {
// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources.
type ValidatingWebhookConfigurationInterface interface {
- Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
- Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
+ Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error)
+ Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfigurationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error)
- Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error)
+ Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error)
ValidatingWebhookConfigurationExpansion
}
// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
type validatingWebhookConfigurations struct {
- *gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]
}
// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations
func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations {
return &validatingWebhookConfigurations{
- gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration](
"validatingwebhookconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} },
- func() *v1beta1.ValidatingWebhookConfigurationList {
- return &v1beta1.ValidatingWebhookConfigurationList{}
- }),
+ func() *admissionregistrationv1beta1.ValidatingWebhookConfiguration {
+ return &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}
+ },
+ func() *admissionregistrationv1beta1.ValidatingWebhookConfigurationList {
+ return &admissionregistrationv1beta1.ValidatingWebhookConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingWebhookConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go
index 1794cb941..b76fadf91 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *InternalV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := apiserverinternalv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
index 436593f7f..cea897b3d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
+ apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1"
+ applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,40 @@ type StorageVersionsGetter interface {
// StorageVersionInterface has methods to work with StorageVersion resources.
type StorageVersionInterface interface {
- Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error)
- Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error)
+ Create(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.CreateOptions) (*apiserverinternalv1alpha1.StorageVersion, error)
+ Update(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error)
+ UpdateStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*apiserverinternalv1alpha1.StorageVersion, error)
+ List(ctx context.Context, opts v1.ListOptions) (*apiserverinternalv1alpha1.StorageVersionList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error)
- Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiserverinternalv1alpha1.StorageVersion, err error)
+ Apply(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error)
+ ApplyStatus(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error)
StorageVersionExpansion
}
// storageVersions implements StorageVersionInterface
type storageVersions struct {
- *gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]
+ *gentype.ClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration]
}
// newStorageVersions returns a StorageVersions
func newStorageVersions(c *InternalV1alpha1Client) *storageVersions {
return &storageVersions{
- gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration](
+ gentype.NewClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration](
"storageversions",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} },
- func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }),
+ func() *apiserverinternalv1alpha1.StorageVersion { return &apiserverinternalv1alpha1.StorageVersion{} },
+ func() *apiserverinternalv1alpha1.StorageVersionList {
+ return &apiserverinternalv1alpha1.StorageVersionList{}
+ },
+ gentype.PrefersProtobuf[*apiserverinternalv1alpha1.StorageVersion](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
index 397542eeb..cb0bf87ba 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/apps/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ appsv1 "k8s.io/api/apps/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := appsv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
index 252f47ba2..8bf810810 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface {
// ControllerRevisionInterface has methods to work with ControllerRevision resources.
type ControllerRevisionInterface interface {
- Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error)
- Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error)
+ Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.CreateOptions) (*appsv1.ControllerRevision, error)
+ Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.UpdateOptions) (*appsv1.ControllerRevision, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ControllerRevision, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ControllerRevisionList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error)
- Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error)
+ Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ControllerRevision, err error)
ControllerRevisionExpansion
}
// controllerRevisions implements ControllerRevisionInterface
type controllerRevisions struct {
- *gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration]
}
// newControllerRevisions returns a ControllerRevisions
func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions {
return &controllerRevisions{
- gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration](
"controllerrevisions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ControllerRevision { return &v1.ControllerRevision{} },
- func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }),
+ func() *appsv1.ControllerRevision { return &appsv1.ControllerRevision{} },
+ func() *appsv1.ControllerRevisionList { return &appsv1.ControllerRevisionList{} },
+ gentype.PrefersProtobuf[*appsv1.ControllerRevision](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
index 28917a796..6354da219 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type DaemonSetsGetter interface {
// DaemonSetInterface has methods to work with DaemonSet resources.
type DaemonSetInterface interface {
- Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error)
- Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
+ Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.CreateOptions) (*appsv1.DaemonSet, error)
+ Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
+ UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.DaemonSet, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DaemonSetList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error)
- Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error)
+ Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error)
+ ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error)
DaemonSetExpansion
}
// daemonSets implements DaemonSetInterface
type daemonSets struct {
- *gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration]
}
// newDaemonSets returns a DaemonSets
func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets {
return &daemonSets{
- gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration](
"daemonsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.DaemonSet { return &v1.DaemonSet{} },
- func() *v1.DaemonSetList { return &v1.DaemonSetList{} }),
+ func() *appsv1.DaemonSet { return &appsv1.DaemonSet{} },
+ func() *appsv1.DaemonSetList { return &appsv1.DaemonSetList{} },
+ gentype.PrefersProtobuf[*appsv1.DaemonSet](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
index 871d51cfe..cc06ccf3a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// DeploymentsGetter has a method to return a DeploymentInterface.
@@ -42,19 +42,19 @@ type DeploymentsGetter interface {
// DeploymentInterface has methods to work with Deployment resources.
type DeploymentInterface interface {
- Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error)
- Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
+ Create(ctx context.Context, deployment *appsv1.Deployment, opts metav1.CreateOptions) (*appsv1.Deployment, error)
+ Update(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
+ UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.Deployment, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DeploymentList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error)
- Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error)
+ Apply(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
+ ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error)
GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
@@ -64,19 +64,21 @@ type DeploymentInterface interface {
// deployments implements DeploymentInterface
type deployments struct {
- *gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration]
}
// newDeployments returns a Deployments
func newDeployments(c *AppsV1Client, namespace string) *deployments {
return &deployments{
- gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration](
"deployments",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Deployment { return &v1.Deployment{} },
- func() *v1.DeploymentList { return &v1.DeploymentList{} }),
+ func() *appsv1.Deployment { return &appsv1.Deployment{} },
+ func() *appsv1.DeploymentList { return &appsv1.DeploymentList{} },
+ gentype.PrefersProtobuf[*appsv1.Deployment](),
+ ),
}
}
@@ -84,6 +86,7 @@ func newDeployments(c *AppsV1Client, namespace string) *deployments {
func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
@@ -98,6 +101,7 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio
func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
@@ -116,19 +120,19 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
result = &autoscalingv1.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
index d6dec016b..db0fed952 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
@@ -42,19 +42,19 @@ type ReplicaSetsGetter interface {
// ReplicaSetInterface has methods to work with ReplicaSet resources.
type ReplicaSetInterface interface {
- Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error)
- Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
+ Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.CreateOptions) (*appsv1.ReplicaSet, error)
+ Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
+ UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ReplicaSet, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ReplicaSetList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error)
- Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error)
+ Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error)
+ ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error)
GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
@@ -64,19 +64,21 @@ type ReplicaSetInterface interface {
// replicaSets implements ReplicaSetInterface
type replicaSets struct {
- *gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration]
}
// newReplicaSets returns a ReplicaSets
func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets {
return &replicaSets{
- gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration](
"replicasets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ReplicaSet { return &v1.ReplicaSet{} },
- func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }),
+ func() *appsv1.ReplicaSet { return &appsv1.ReplicaSet{} },
+ func() *appsv1.ReplicaSetList { return &appsv1.ReplicaSetList{} },
+ gentype.PrefersProtobuf[*appsv1.ReplicaSet](),
+ ),
}
}
@@ -84,6 +86,7 @@ func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets {
func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
@@ -98,6 +101,7 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio
func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
@@ -116,19 +120,19 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
result = &autoscalingv1.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
index b25ed0723..e52cc6159 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
@@ -19,19 +19,19 @@ limitations under the License.
package v1
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1 "k8s.io/api/apps/v1"
+ appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// StatefulSetsGetter has a method to return a StatefulSetInterface.
@@ -42,19 +42,19 @@ type StatefulSetsGetter interface {
// StatefulSetInterface has methods to work with StatefulSet resources.
type StatefulSetInterface interface {
- Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error)
- Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
+ Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.CreateOptions) (*appsv1.StatefulSet, error)
+ Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
+ UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.StatefulSet, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*appsv1.StatefulSetList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error)
- Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error)
+ Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error)
+ ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error)
GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
@@ -64,19 +64,21 @@ type StatefulSetInterface interface {
// statefulSets implements StatefulSetInterface
type statefulSets struct {
- *gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration]
}
// newStatefulSets returns a StatefulSets
func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets {
return &statefulSets{
- gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration](
"statefulsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.StatefulSet { return &v1.StatefulSet{} },
- func() *v1.StatefulSetList { return &v1.StatefulSetList{} }),
+ func() *appsv1.StatefulSet { return &appsv1.StatefulSet{} },
+ func() *appsv1.StatefulSetList { return &appsv1.StatefulSetList{} },
+ gentype.PrefersProtobuf[*appsv1.StatefulSet](),
+ ),
}
}
@@ -84,6 +86,7 @@ func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets {
func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
@@ -98,6 +101,7 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt
func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
@@ -116,19 +120,19 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
result = &autoscalingv1.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
index 6b7148c5a..72bde633b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/apps/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -95,10 +95,10 @@ func New(c rest.Interface) *AppsV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := appsv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
index 185f7cc4e..1bd92695b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
+ applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface {
// ControllerRevisionInterface has methods to work with ControllerRevision resources.
type ControllerRevisionInterface interface {
- Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error)
- Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error)
+ Create(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.CreateOptions) (*appsv1beta1.ControllerRevision, error)
+ Update(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta1.ControllerRevision, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.ControllerRevision, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.ControllerRevisionList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error)
- Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.ControllerRevision, err error)
+ Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.ControllerRevision, err error)
ControllerRevisionExpansion
}
// controllerRevisions implements ControllerRevisionInterface
type controllerRevisions struct {
- *gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration]
}
// newControllerRevisions returns a ControllerRevisions
func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions {
return &controllerRevisions{
- gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration](
"controllerrevisions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} },
- func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }),
+ func() *appsv1beta1.ControllerRevision { return &appsv1beta1.ControllerRevision{} },
+ func() *appsv1beta1.ControllerRevisionList { return &appsv1beta1.ControllerRevisionList{} },
+ gentype.PrefersProtobuf[*appsv1beta1.ControllerRevision](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
index 06e4b7bf9..e01dd5a2f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
+ applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type DeploymentsGetter interface {
// DeploymentInterface has methods to work with Deployment resources.
type DeploymentInterface interface {
- Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
- Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+ Create(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.CreateOptions) (*appsv1beta1.Deployment, error)
+ Update(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+ UpdateStatus(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.Deployment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.DeploymentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
- Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.Deployment, err error)
+ Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
+ ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error)
DeploymentExpansion
}
// deployments implements DeploymentInterface
type deployments struct {
- *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration]
}
// newDeployments returns a Deployments
func newDeployments(c *AppsV1beta1Client, namespace string) *deployments {
return &deployments{
- gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration](
"deployments",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Deployment { return &v1beta1.Deployment{} },
- func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }),
+ func() *appsv1beta1.Deployment { return &appsv1beta1.Deployment{} },
+ func() *appsv1beta1.DeploymentList { return &appsv1beta1.DeploymentList{} },
+ gentype.PrefersProtobuf[*appsv1beta1.Deployment](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
index 1ff69eb99..b88acdeb0 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
+ applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type StatefulSetsGetter interface {
// StatefulSetInterface has methods to work with StatefulSet resources.
type StatefulSetInterface interface {
- Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error)
- Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
+ Create(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.CreateOptions) (*appsv1beta1.StatefulSet, error)
+ Update(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
+ UpdateStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.StatefulSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.StatefulSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error)
- Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.StatefulSet, err error)
+ Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error)
+ ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error)
StatefulSetExpansion
}
// statefulSets implements StatefulSetInterface
type statefulSets struct {
- *gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration]
}
// newStatefulSets returns a StatefulSets
func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets {
return &statefulSets{
- gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration](
"statefulsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} },
- func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }),
+ func() *appsv1beta1.StatefulSet { return &appsv1beta1.StatefulSet{} },
+ func() *appsv1beta1.StatefulSetList { return &appsv1beta1.StatefulSetList{} },
+ gentype.PrefersProtobuf[*appsv1beta1.StatefulSet](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
index 968abc56f..e13d12a76 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta2
import (
- "net/http"
+ http "net/http"
- v1beta2 "k8s.io/api/apps/v1beta2"
- "k8s.io/client-go/kubernetes/scheme"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1beta2Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta2.SchemeGroupVersion
+ gv := appsv1beta2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
index 6caee6a72..a170805b3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
+ applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ControllerRevisionsGetter interface {
// ControllerRevisionInterface has methods to work with ControllerRevision resources.
type ControllerRevisionInterface interface {
- Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error)
- Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error)
+ Create(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.CreateOptions) (*appsv1beta2.ControllerRevision, error)
+ Update(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta2.ControllerRevision, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ControllerRevision, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ControllerRevisionList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error)
- Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ControllerRevision, err error)
+ Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ControllerRevision, err error)
ControllerRevisionExpansion
}
// controllerRevisions implements ControllerRevisionInterface
type controllerRevisions struct {
- *gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration]
}
// newControllerRevisions returns a ControllerRevisions
func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions {
return &controllerRevisions{
- gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration](
"controllerrevisions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} },
- func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }),
+ func() *appsv1beta2.ControllerRevision { return &appsv1beta2.ControllerRevision{} },
+ func() *appsv1beta2.ControllerRevisionList { return &appsv1beta2.ControllerRevisionList{} },
+ gentype.PrefersProtobuf[*appsv1beta2.ControllerRevision](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
index 766dc6d43..f078121b0 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
+ applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type DaemonSetsGetter interface {
// DaemonSetInterface has methods to work with DaemonSet resources.
type DaemonSetInterface interface {
- Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error)
- Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
+ Create(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.CreateOptions) (*appsv1beta2.DaemonSet, error)
+ Update(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
+ UpdateStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.DaemonSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DaemonSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error)
- Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.DaemonSet, err error)
+ Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error)
+ ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error)
DaemonSetExpansion
}
// daemonSets implements DaemonSetInterface
type daemonSets struct {
- *gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration]
}
// newDaemonSets returns a DaemonSets
func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets {
return &daemonSets{
- gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration](
"daemonsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} },
- func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }),
+ func() *appsv1beta2.DaemonSet { return &appsv1beta2.DaemonSet{} },
+ func() *appsv1beta2.DaemonSetList { return &appsv1beta2.DaemonSetList{} },
+ gentype.PrefersProtobuf[*appsv1beta2.DaemonSet](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
index 6592ee8cd..1be57edb2 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
+ applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type DeploymentsGetter interface {
// DeploymentInterface has methods to work with Deployment resources.
type DeploymentInterface interface {
- Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error)
- Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
+ Create(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.CreateOptions) (*appsv1beta2.Deployment, error)
+ Update(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
+ UpdateStatus(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.Deployment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DeploymentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error)
- Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.Deployment, err error)
+ Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error)
+ ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error)
DeploymentExpansion
}
// deployments implements DeploymentInterface
type deployments struct {
- *gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration]
}
// newDeployments returns a Deployments
func newDeployments(c *AppsV1beta2Client, namespace string) *deployments {
return &deployments{
- gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration](
"deployments",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta2.Deployment { return &v1beta2.Deployment{} },
- func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }),
+ func() *appsv1beta2.Deployment { return &appsv1beta2.Deployment{} },
+ func() *appsv1beta2.DeploymentList { return &appsv1beta2.DeploymentList{} },
+ gentype.PrefersProtobuf[*appsv1beta2.Deployment](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
index 90380ca98..12bac0923 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
+ applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type ReplicaSetsGetter interface {
// ReplicaSetInterface has methods to work with ReplicaSet resources.
type ReplicaSetInterface interface {
- Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error)
- Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
+ Create(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.CreateOptions) (*appsv1beta2.ReplicaSet, error)
+ Update(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
+ UpdateStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ReplicaSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ReplicaSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error)
- Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ReplicaSet, err error)
+ Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error)
+ ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error)
ReplicaSetExpansion
}
// replicaSets implements ReplicaSetInterface
type replicaSets struct {
- *gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration]
}
// newReplicaSets returns a ReplicaSets
func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets {
return &replicaSets{
- gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration](
"replicasets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} },
- func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }),
+ func() *appsv1beta2.ReplicaSet { return &appsv1beta2.ReplicaSet{} },
+ func() *appsv1beta2.ReplicaSetList { return &appsv1beta2.ReplicaSetList{} },
+ gentype.PrefersProtobuf[*appsv1beta2.ReplicaSet](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
index f2d673abb..c71e93494 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1beta2
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1beta2 "k8s.io/api/apps/v1beta2"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
+ applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// StatefulSetsGetter has a method to return a StatefulSetInterface.
@@ -40,48 +40,51 @@ type StatefulSetsGetter interface {
// StatefulSetInterface has methods to work with StatefulSet resources.
type StatefulSetInterface interface {
- Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error)
- Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
+ Create(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.CreateOptions) (*appsv1beta2.StatefulSet, error)
+ Update(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
+ UpdateStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.StatefulSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.StatefulSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error)
- Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.StatefulSet, err error)
+ Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error)
- GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
- UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error)
- ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta2.Scale, error)
+ ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error)
+ GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*appsv1beta2.Scale, error)
+ UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (*appsv1beta2.Scale, error)
+ ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*appsv1beta2.Scale, error)
StatefulSetExpansion
}
// statefulSets implements StatefulSetInterface
type statefulSets struct {
- *gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration]
}
// newStatefulSets returns a StatefulSets
func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets {
return &statefulSets{
- gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration](
"statefulsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} },
- func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }),
+ func() *appsv1beta2.StatefulSet { return &appsv1beta2.StatefulSet{} },
+ func() *appsv1beta2.StatefulSetList { return &appsv1beta2.StatefulSetList{} },
+ gentype.PrefersProtobuf[*appsv1beta2.StatefulSet](),
+ ),
}
}
-// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any.
-func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
- result = &v1beta2.Scale{}
+// GetScale takes name of the statefulSet, and returns the corresponding appsv1beta2.Scale object, and an error if there is any.
+func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *appsv1beta2.Scale, err error) {
+ result = &appsv1beta2.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
@@ -93,9 +96,10 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt
}
// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) {
- result = &v1beta2.Scale{}
+func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (result *appsv1beta2.Scale, err error) {
+ result = &appsv1beta2.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
@@ -109,24 +113,24 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string,
// ApplyScale takes top resource name and the apply declarative configuration for scale,
// applies it and returns the applied scale, and an error, if there is any.
-func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) {
+func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Scale, err error) {
if scale == nil {
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
- result = &v1beta2.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ result = &appsv1beta2.Scale{}
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("statefulsets").
Name(statefulSetName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
index 81be8b2e0..bd5df7798 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/authentication/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ authenticationv1 "k8s.io/api/authentication/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := authenticationv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
index 720dd9e7e..9113b6a6d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authentication/v1"
+ authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface {
// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources.
type SelfSubjectReviewInterface interface {
- Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error)
+ Create(ctx context.Context, selfSubjectReview *authenticationv1.SelfSubjectReview, opts metav1.CreateOptions) (*authenticationv1.SelfSubjectReview, error)
SelfSubjectReviewExpansion
}
// selfSubjectReviews implements SelfSubjectReviewInterface
type selfSubjectReviews struct {
- *gentype.Client[*v1.SelfSubjectReview]
+ *gentype.Client[*authenticationv1.SelfSubjectReview]
}
// newSelfSubjectReviews returns a SelfSubjectReviews
func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews {
return &selfSubjectReviews{
- gentype.NewClient[*v1.SelfSubjectReview](
+ gentype.NewClient[*authenticationv1.SelfSubjectReview](
"selfsubjectreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }),
+ func() *authenticationv1.SelfSubjectReview { return &authenticationv1.SelfSubjectReview{} },
+ gentype.PrefersProtobuf[*authenticationv1.SelfSubjectReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
index 52c55fab0..ce8b62d1b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authentication/v1"
+ authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type TokenReviewsGetter interface {
// TokenReviewInterface has methods to work with TokenReview resources.
type TokenReviewInterface interface {
- Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error)
+ Create(ctx context.Context, tokenReview *authenticationv1.TokenReview, opts metav1.CreateOptions) (*authenticationv1.TokenReview, error)
TokenReviewExpansion
}
// tokenReviews implements TokenReviewInterface
type tokenReviews struct {
- *gentype.Client[*v1.TokenReview]
+ *gentype.Client[*authenticationv1.TokenReview]
}
// newTokenReviews returns a TokenReviews
func newTokenReviews(c *AuthenticationV1Client) *tokenReviews {
return &tokenReviews{
- gentype.NewClient[*v1.TokenReview](
+ gentype.NewClient[*authenticationv1.TokenReview](
"tokenreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.TokenReview { return &v1.TokenReview{} }),
+ func() *authenticationv1.TokenReview { return &authenticationv1.TokenReview{} },
+ gentype.PrefersProtobuf[*authenticationv1.TokenReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go
index 187392661..821265859 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/authentication/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *AuthenticationV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := authenticationv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
index f034bcdbe..8d5b176f7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/authentication/v1alpha1"
+ authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface {
// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources.
type SelfSubjectReviewInterface interface {
- Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*v1alpha1.SelfSubjectReview, error)
+ Create(ctx context.Context, selfSubjectReview *authenticationv1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1alpha1.SelfSubjectReview, error)
SelfSubjectReviewExpansion
}
// selfSubjectReviews implements SelfSubjectReviewInterface
type selfSubjectReviews struct {
- *gentype.Client[*v1alpha1.SelfSubjectReview]
+ *gentype.Client[*authenticationv1alpha1.SelfSubjectReview]
}
// newSelfSubjectReviews returns a SelfSubjectReviews
func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews {
return &selfSubjectReviews{
- gentype.NewClient[*v1alpha1.SelfSubjectReview](
+ gentype.NewClient[*authenticationv1alpha1.SelfSubjectReview](
"selfsubjectreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }),
+ func() *authenticationv1alpha1.SelfSubjectReview { return &authenticationv1alpha1.SelfSubjectReview{} },
+ gentype.PrefersProtobuf[*authenticationv1alpha1.SelfSubjectReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
index 7823729e0..7b22e46e3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/authentication/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := authenticationv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
index d083ba8fa..e29f81451 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authentication/v1beta1"
+ authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SelfSubjectReviewsGetter interface {
// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources.
type SelfSubjectReviewInterface interface {
- Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error)
+ Create(ctx context.Context, selfSubjectReview *authenticationv1beta1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1beta1.SelfSubjectReview, error)
SelfSubjectReviewExpansion
}
// selfSubjectReviews implements SelfSubjectReviewInterface
type selfSubjectReviews struct {
- *gentype.Client[*v1beta1.SelfSubjectReview]
+ *gentype.Client[*authenticationv1beta1.SelfSubjectReview]
}
// newSelfSubjectReviews returns a SelfSubjectReviews
func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews {
return &selfSubjectReviews{
- gentype.NewClient[*v1beta1.SelfSubjectReview](
+ gentype.NewClient[*authenticationv1beta1.SelfSubjectReview](
"selfsubjectreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }),
+ func() *authenticationv1beta1.SelfSubjectReview { return &authenticationv1beta1.SelfSubjectReview{} },
+ gentype.PrefersProtobuf[*authenticationv1beta1.SelfSubjectReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
index 982534935..5e1e002be 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authentication/v1beta1"
+ authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type TokenReviewsGetter interface {
// TokenReviewInterface has methods to work with TokenReview resources.
type TokenReviewInterface interface {
- Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error)
+ Create(ctx context.Context, tokenReview *authenticationv1beta1.TokenReview, opts v1.CreateOptions) (*authenticationv1beta1.TokenReview, error)
TokenReviewExpansion
}
// tokenReviews implements TokenReviewInterface
type tokenReviews struct {
- *gentype.Client[*v1beta1.TokenReview]
+ *gentype.Client[*authenticationv1beta1.TokenReview]
}
// newTokenReviews returns a TokenReviews
func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews {
return &tokenReviews{
- gentype.NewClient[*v1beta1.TokenReview](
+ gentype.NewClient[*authenticationv1beta1.TokenReview](
"tokenreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }),
+ func() *authenticationv1beta1.TokenReview { return &authenticationv1beta1.TokenReview{} },
+ gentype.PrefersProtobuf[*authenticationv1beta1.TokenReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
index edfc90346..71fb89b38 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/authorization/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := authorizationv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
index 3d058941a..24327e87f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authorization/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type LocalSubjectAccessReviewsGetter interface {
// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
type LocalSubjectAccessReviewInterface interface {
- Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error)
+ Create(ctx context.Context, localSubjectAccessReview *authorizationv1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.LocalSubjectAccessReview, error)
LocalSubjectAccessReviewExpansion
}
// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
type localSubjectAccessReviews struct {
- *gentype.Client[*v1.LocalSubjectAccessReview]
+ *gentype.Client[*authorizationv1.LocalSubjectAccessReview]
}
// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews {
return &localSubjectAccessReviews{
- gentype.NewClient[*v1.LocalSubjectAccessReview](
+ gentype.NewClient[*authorizationv1.LocalSubjectAccessReview](
"localsubjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }),
+ func() *authorizationv1.LocalSubjectAccessReview { return &authorizationv1.LocalSubjectAccessReview{} },
+ gentype.PrefersProtobuf[*authorizationv1.LocalSubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
index 9e874bee5..014faeffb 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authorization/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SelfSubjectAccessReviewsGetter interface {
// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
type SelfSubjectAccessReviewInterface interface {
- Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error)
+ Create(ctx context.Context, selfSubjectAccessReview *authorizationv1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectAccessReview, error)
SelfSubjectAccessReviewExpansion
}
// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
type selfSubjectAccessReviews struct {
- *gentype.Client[*v1.SelfSubjectAccessReview]
+ *gentype.Client[*authorizationv1.SelfSubjectAccessReview]
}
// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews {
return &selfSubjectAccessReviews{
- gentype.NewClient[*v1.SelfSubjectAccessReview](
+ gentype.NewClient[*authorizationv1.SelfSubjectAccessReview](
"selfsubjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }),
+ func() *authorizationv1.SelfSubjectAccessReview { return &authorizationv1.SelfSubjectAccessReview{} },
+ gentype.PrefersProtobuf[*authorizationv1.SelfSubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
index 567b63ec4..a14b2d7d5 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authorization/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SelfSubjectRulesReviewsGetter interface {
// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
type SelfSubjectRulesReviewInterface interface {
- Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error)
+ Create(ctx context.Context, selfSubjectRulesReview *authorizationv1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectRulesReview, error)
SelfSubjectRulesReviewExpansion
}
// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
type selfSubjectRulesReviews struct {
- *gentype.Client[*v1.SelfSubjectRulesReview]
+ *gentype.Client[*authorizationv1.SelfSubjectRulesReview]
}
// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews {
return &selfSubjectRulesReviews{
- gentype.NewClient[*v1.SelfSubjectRulesReview](
+ gentype.NewClient[*authorizationv1.SelfSubjectRulesReview](
"selfsubjectrulesreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }),
+ func() *authorizationv1.SelfSubjectRulesReview { return &authorizationv1.SelfSubjectRulesReview{} },
+ gentype.PrefersProtobuf[*authorizationv1.SelfSubjectRulesReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
index 52e8d74e5..bdc9955ad 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/authorization/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SubjectAccessReviewsGetter interface {
// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
type SubjectAccessReviewInterface interface {
- Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error)
+ Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error)
SubjectAccessReviewExpansion
}
// subjectAccessReviews implements SubjectAccessReviewInterface
type subjectAccessReviews struct {
- *gentype.Client[*v1.SubjectAccessReview]
+ *gentype.Client[*authorizationv1.SubjectAccessReview]
}
// newSubjectAccessReviews returns a SubjectAccessReviews
func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews {
return &subjectAccessReviews{
- gentype.NewClient[*v1.SubjectAccessReview](
+ gentype.NewClient[*authorizationv1.SubjectAccessReview](
"subjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }),
+ func() *authorizationv1.SubjectAccessReview { return &authorizationv1.SubjectAccessReview{} },
+ gentype.PrefersProtobuf[*authorizationv1.SubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
index 23b0edf27..f33619eb3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/authorization/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := authorizationv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
index 302c094b3..8dcc984f7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authorization/v1beta1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,27 @@ type LocalSubjectAccessReviewsGetter interface {
// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
type LocalSubjectAccessReviewInterface interface {
- Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error)
+ Create(ctx context.Context, localSubjectAccessReview *authorizationv1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.LocalSubjectAccessReview, error)
LocalSubjectAccessReviewExpansion
}
// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
type localSubjectAccessReviews struct {
- *gentype.Client[*v1beta1.LocalSubjectAccessReview]
+ *gentype.Client[*authorizationv1beta1.LocalSubjectAccessReview]
}
// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews {
return &localSubjectAccessReviews{
- gentype.NewClient[*v1beta1.LocalSubjectAccessReview](
+ gentype.NewClient[*authorizationv1beta1.LocalSubjectAccessReview](
"localsubjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }),
+ func() *authorizationv1beta1.LocalSubjectAccessReview {
+ return &authorizationv1beta1.LocalSubjectAccessReview{}
+ },
+ gentype.PrefersProtobuf[*authorizationv1beta1.LocalSubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
index 4b413dc4f..b1f111f3f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authorization/v1beta1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,27 @@ type SelfSubjectAccessReviewsGetter interface {
// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
type SelfSubjectAccessReviewInterface interface {
- Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error)
+ Create(ctx context.Context, selfSubjectAccessReview *authorizationv1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectAccessReview, error)
SelfSubjectAccessReviewExpansion
}
// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
type selfSubjectAccessReviews struct {
- *gentype.Client[*v1beta1.SelfSubjectAccessReview]
+ *gentype.Client[*authorizationv1beta1.SelfSubjectAccessReview]
}
// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews {
return &selfSubjectAccessReviews{
- gentype.NewClient[*v1beta1.SelfSubjectAccessReview](
+ gentype.NewClient[*authorizationv1beta1.SelfSubjectAccessReview](
"selfsubjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }),
+ func() *authorizationv1beta1.SelfSubjectAccessReview {
+ return &authorizationv1beta1.SelfSubjectAccessReview{}
+ },
+ gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
index b64cec301..11a11b8e6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authorization/v1beta1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,27 @@ type SelfSubjectRulesReviewsGetter interface {
// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
type SelfSubjectRulesReviewInterface interface {
- Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error)
+ Create(ctx context.Context, selfSubjectRulesReview *authorizationv1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectRulesReview, error)
SelfSubjectRulesReviewExpansion
}
// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
type selfSubjectRulesReviews struct {
- *gentype.Client[*v1beta1.SelfSubjectRulesReview]
+ *gentype.Client[*authorizationv1beta1.SelfSubjectRulesReview]
}
// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews
func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews {
return &selfSubjectRulesReviews{
- gentype.NewClient[*v1beta1.SelfSubjectRulesReview](
+ gentype.NewClient[*authorizationv1beta1.SelfSubjectRulesReview](
"selfsubjectrulesreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }),
+ func() *authorizationv1beta1.SelfSubjectRulesReview {
+ return &authorizationv1beta1.SelfSubjectRulesReview{}
+ },
+ gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectRulesReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
index 3fca833a1..b62537521 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
@@ -19,9 +19,9 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/authorization/v1beta1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
@@ -35,23 +35,25 @@ type SubjectAccessReviewsGetter interface {
// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
type SubjectAccessReviewInterface interface {
- Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error)
+ Create(ctx context.Context, subjectAccessReview *authorizationv1beta1.SubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SubjectAccessReview, error)
SubjectAccessReviewExpansion
}
// subjectAccessReviews implements SubjectAccessReviewInterface
type subjectAccessReviews struct {
- *gentype.Client[*v1beta1.SubjectAccessReview]
+ *gentype.Client[*authorizationv1beta1.SubjectAccessReview]
}
// newSubjectAccessReviews returns a SubjectAccessReviews
func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews {
return &subjectAccessReviews{
- gentype.NewClient[*v1beta1.SubjectAccessReview](
+ gentype.NewClient[*authorizationv1beta1.SubjectAccessReview](
"subjectaccessreviews",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }),
+ func() *authorizationv1beta1.SubjectAccessReview { return &authorizationv1beta1.SubjectAccessReview{} },
+ gentype.PrefersProtobuf[*authorizationv1beta1.SubjectAccessReview](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
index f3a2752cb..6ceaaf82a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/autoscaling/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := autoscalingv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
index 4d29ac522..c5c69b7c6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/autoscaling/v1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
+ applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type HorizontalPodAutoscalersGetter interface {
// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
type HorizontalPodAutoscalerInterface interface {
- Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error)
- Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
+ Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error)
+ Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
+ UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*autoscalingv1.HorizontalPodAutoscaler, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*autoscalingv1.HorizontalPodAutoscalerList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error)
- Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error)
+ Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error)
+ ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error)
HorizontalPodAutoscalerExpansion
}
// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
type horizontalPodAutoscalers struct {
- *gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]
+ *gentype.ClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration]
}
// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers {
return &horizontalPodAutoscalers{
- gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration](
+ gentype.NewClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration](
"horizontalpodautoscalers",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} },
- func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }),
+ func() *autoscalingv1.HorizontalPodAutoscaler { return &autoscalingv1.HorizontalPodAutoscaler{} },
+ func() *autoscalingv1.HorizontalPodAutoscalerList { return &autoscalingv1.HorizontalPodAutoscalerList{} },
+ gentype.PrefersProtobuf[*autoscalingv1.HorizontalPodAutoscaler](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go
index 04d5d0f94..78a2609bf 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v2
import (
- "net/http"
+ http "net/http"
- v2 "k8s.io/api/autoscaling/v2"
- "k8s.io/client-go/kubernetes/scheme"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v2.SchemeGroupVersion
+ gv := autoscalingv2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
index dbce8d102..9eb4a6d93 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2
import (
- "context"
+ context "context"
- v2 "k8s.io/api/autoscaling/v2"
+ autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2"
+ applyconfigurationsautoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type HorizontalPodAutoscalersGetter interface {
// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
type HorizontalPodAutoscalerInterface interface {
- Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error)
- Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error)
+ Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error)
+ Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error)
+ UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.HorizontalPodAutoscaler, error)
- List(ctx context.Context, opts v1.ListOptions) (*v2.HorizontalPodAutoscalerList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2.HorizontalPodAutoscaler, error)
+ List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2.HorizontalPodAutoscalerList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error)
- Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2.HorizontalPodAutoscaler, err error)
+ Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error)
+ ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error)
HorizontalPodAutoscalerExpansion
}
// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
type horizontalPodAutoscalers struct {
- *gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]
+ *gentype.ClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration]
}
// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers {
return &horizontalPodAutoscalers{
- gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration](
+ gentype.NewClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration](
"horizontalpodautoscalers",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} },
- func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }),
+ func() *autoscalingv2.HorizontalPodAutoscaler { return &autoscalingv2.HorizontalPodAutoscaler{} },
+ func() *autoscalingv2.HorizontalPodAutoscalerList { return &autoscalingv2.HorizontalPodAutoscalerList{} },
+ gentype.PrefersProtobuf[*autoscalingv2.HorizontalPodAutoscaler](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
index d1dde5ed1..1fcda17c8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v2beta1
import (
- "net/http"
+ http "net/http"
- v2beta1 "k8s.io/api/autoscaling/v2beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v2beta1.SchemeGroupVersion
+ gv := autoscalingv2beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
index 6bc1b7776..c1dc75ccc 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta1
import (
- "context"
+ context "context"
- v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1"
+ applyconfigurationsautoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type HorizontalPodAutoscalersGetter interface {
// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
type HorizontalPodAutoscalerInterface interface {
- Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
- Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+ Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error)
+ Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+ UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error)
- List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error)
+ List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta1.HorizontalPodAutoscalerList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error)
- Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error)
+ Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error)
+ ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error)
HorizontalPodAutoscalerExpansion
}
// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
type horizontalPodAutoscalers struct {
- *gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]
+ *gentype.ClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]
}
// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers {
return &horizontalPodAutoscalers{
- gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration](
+ gentype.NewClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration](
"horizontalpodautoscalers",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} },
- func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }),
+ func() *autoscalingv2beta1.HorizontalPodAutoscaler {
+ return &autoscalingv2beta1.HorizontalPodAutoscaler{}
+ },
+ func() *autoscalingv2beta1.HorizontalPodAutoscalerList {
+ return &autoscalingv2beta1.HorizontalPodAutoscalerList{}
+ },
+ gentype.PrefersProtobuf[*autoscalingv2beta1.HorizontalPodAutoscaler](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
index cae1b4e43..62f5b743c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v2beta2
import (
- "net/http"
+ http "net/http"
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
- "k8s.io/client-go/kubernetes/scheme"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta2Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v2beta2.SchemeGroupVersion
+ gv := autoscalingv2beta2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
index 6f464661a..017b3e1fc 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -19,13 +19,13 @@ limitations under the License.
package v2beta2
import (
- "context"
+ context "context"
- v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2"
+ applyconfigurationsautoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type HorizontalPodAutoscalersGetter interface {
// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
type HorizontalPodAutoscalerInterface interface {
- Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
- Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+ Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error)
+ Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+ UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error)
- List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error)
+ List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta2.HorizontalPodAutoscalerList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error)
- Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error)
+ Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error)
+ ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error)
HorizontalPodAutoscalerExpansion
}
// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
type horizontalPodAutoscalers struct {
- *gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]
+ *gentype.ClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]
}
// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers
func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers {
return &horizontalPodAutoscalers{
- gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration](
+ gentype.NewClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration](
"horizontalpodautoscalers",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} },
- func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }),
+ func() *autoscalingv2beta2.HorizontalPodAutoscaler {
+ return &autoscalingv2beta2.HorizontalPodAutoscaler{}
+ },
+ func() *autoscalingv2beta2.HorizontalPodAutoscalerList {
+ return &autoscalingv2beta2.HorizontalPodAutoscalerList{}
+ },
+ gentype.PrefersProtobuf[*autoscalingv2beta2.HorizontalPodAutoscaler](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
index eee144f71..614d049f3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/batch/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ batchv1 "k8s.io/api/batch/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *BatchV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := batchv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
index 7907a5bf5..29ef3e9b7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- batchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
+ applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type CronJobsGetter interface {
// CronJobInterface has methods to work with CronJob resources.
type CronJobInterface interface {
- Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error)
- Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error)
+ Create(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.CreateOptions) (*batchv1.CronJob, error)
+ Update(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error)
+ UpdateStatus(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CronJob, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.CronJobList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.CronJob, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*batchv1.CronJobList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error)
- Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.CronJob, err error)
+ Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error)
+ ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error)
CronJobExpansion
}
// cronJobs implements CronJobInterface
type cronJobs struct {
- *gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]
+ *gentype.ClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration]
}
// newCronJobs returns a CronJobs
func newCronJobs(c *BatchV1Client, namespace string) *cronJobs {
return &cronJobs{
- gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration](
+ gentype.NewClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration](
"cronjobs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.CronJob { return &v1.CronJob{} },
- func() *v1.CronJobList { return &v1.CronJobList{} }),
+ func() *batchv1.CronJob { return &batchv1.CronJob{} },
+ func() *batchv1.CronJobList { return &batchv1.CronJobList{} },
+ gentype.PrefersProtobuf[*batchv1.CronJob](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
index 83dbe6fa4..d77aa0f03 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/batch/v1"
+ batchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- batchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
+ applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type JobsGetter interface {
// JobInterface has methods to work with Job resources.
type JobInterface interface {
- Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error)
- Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
+ Create(ctx context.Context, job *batchv1.Job, opts metav1.CreateOptions) (*batchv1.Job, error)
+ Update(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
+ UpdateStatus(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.Job, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*batchv1.JobList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error)
- Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.Job, err error)
+ Apply(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error)
+ ApplyStatus(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error)
JobExpansion
}
// jobs implements JobInterface
type jobs struct {
- *gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]
+ *gentype.ClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration]
}
// newJobs returns a Jobs
func newJobs(c *BatchV1Client, namespace string) *jobs {
return &jobs{
- gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration](
+ gentype.NewClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration](
"jobs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Job { return &v1.Job{} },
- func() *v1.JobList { return &v1.JobList{} }),
+ func() *batchv1.Job { return &batchv1.Job{} },
+ func() *batchv1.JobList { return &batchv1.JobList{} },
+ gentype.PrefersProtobuf[*batchv1.Job](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
index ebbf063ec..2da9e4135 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/batch/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *BatchV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := batchv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
index a6f7399d8..3091020ba 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/batch/v1beta1"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1"
+ applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type CronJobsGetter interface {
// CronJobInterface has methods to work with CronJob resources.
type CronJobInterface interface {
- Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error)
- Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
+ Create(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.CreateOptions) (*batchv1beta1.CronJob, error)
+ Update(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
+ UpdateStatus(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*batchv1beta1.CronJob, error)
+ List(ctx context.Context, opts v1.ListOptions) (*batchv1beta1.CronJobList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error)
- Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1beta1.CronJob, err error)
+ Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error)
+ ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error)
CronJobExpansion
}
// cronJobs implements CronJobInterface
type cronJobs struct {
- *gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]
+ *gentype.ClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration]
}
// newCronJobs returns a CronJobs
func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs {
return &cronJobs{
- gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration](
+ gentype.NewClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration](
"cronjobs",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.CronJob { return &v1beta1.CronJob{} },
- func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }),
+ func() *batchv1beta1.CronJob { return &batchv1beta1.CronJob{} },
+ func() *batchv1beta1.CronJobList { return &batchv1beta1.CronJobList{} },
+ gentype.PrefersProtobuf[*batchv1beta1.CronJob](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go
index 6d87c539e..60337cd23 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/certificates/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ certificatesv1 "k8s.io/api/certificates/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := certificatesv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
index 9fa3300e6..6863a22d1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/certificates/v1"
+ certificatesv1 "k8s.io/api/certificates/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1"
+ applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,46 +38,51 @@ type CertificateSigningRequestsGetter interface {
// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources.
type CertificateSigningRequestInterface interface {
- Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error)
- Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
+ Create(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.CreateOptions) (*certificatesv1.CertificateSigningRequest, error)
+ Update(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
+ UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CertificateSigningRequest, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.CertificateSigningRequestList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*certificatesv1.CertificateSigningRequest, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*certificatesv1.CertificateSigningRequestList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error)
- Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *certificatesv1.CertificateSigningRequest, err error)
+ Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error)
- UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error)
+ ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error)
+ UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error)
CertificateSigningRequestExpansion
}
// certificateSigningRequests implements CertificateSigningRequestInterface
type certificateSigningRequests struct {
- *gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]
+ *gentype.ClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration]
}
// newCertificateSigningRequests returns a CertificateSigningRequests
func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests {
return &certificateSigningRequests{
- gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration](
+ gentype.NewClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration](
"certificatesigningrequests",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} },
- func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }),
+ func() *certificatesv1.CertificateSigningRequest { return &certificatesv1.CertificateSigningRequest{} },
+ func() *certificatesv1.CertificateSigningRequestList {
+ return &certificatesv1.CertificateSigningRequestList{}
+ },
+ gentype.PrefersProtobuf[*certificatesv1.CertificateSigningRequest](),
+ ),
}
}
// UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
-func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) {
- result = &v1.CertificateSigningRequest{}
+func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificatesv1.CertificateSigningRequest, err error) {
+ result = &certificatesv1.CertificateSigningRequest{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Resource("certificatesigningrequests").
Name(certificateSigningRequestName).
SubResource("approval").
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
index a9050af94..36e08253a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/certificates/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := certificatesv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
index 74fe9fa14..df215ff53 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/certificates/v1alpha1"
+ certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
+ applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,36 @@ type ClusterTrustBundlesGetter interface {
// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources.
type ClusterTrustBundleInterface interface {
- Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error)
- Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error)
+ Create(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error)
+ Update(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1alpha1.ClusterTrustBundle, error)
+ List(ctx context.Context, opts v1.ListOptions) (*certificatesv1alpha1.ClusterTrustBundleList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error)
- Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1alpha1.ClusterTrustBundle, err error)
+ Apply(ctx context.Context, clusterTrustBundle *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1alpha1.ClusterTrustBundle, err error)
ClusterTrustBundleExpansion
}
// clusterTrustBundles implements ClusterTrustBundleInterface
type clusterTrustBundles struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration]
}
// newClusterTrustBundles returns a ClusterTrustBundles
func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles {
return &clusterTrustBundles{
- gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration](
"clustertrustbundles",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} },
- func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }),
+ func() *certificatesv1alpha1.ClusterTrustBundle { return &certificatesv1alpha1.ClusterTrustBundle{} },
+ func() *certificatesv1alpha1.ClusterTrustBundleList {
+ return &certificatesv1alpha1.ClusterTrustBundleList{}
+ },
+ gentype.PrefersProtobuf[*certificatesv1alpha1.ClusterTrustBundle](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
index fa97b441d..f040e7664 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/certificates/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := certificatesv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
index de9915c5d..4c6e28c65 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/certificates/v1beta1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
+ applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type CertificateSigningRequestsGetter interface {
// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources.
type CertificateSigningRequestInterface interface {
- Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error)
- Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
+ Create(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*certificatesv1beta1.CertificateSigningRequest, error)
+ Update(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
+ UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1beta1.CertificateSigningRequest, error)
+ List(ctx context.Context, opts v1.ListOptions) (*certificatesv1beta1.CertificateSigningRequestList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error)
- Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1beta1.CertificateSigningRequest, err error)
+ Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error)
+ ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error)
CertificateSigningRequestExpansion
}
// certificateSigningRequests implements CertificateSigningRequestInterface
type certificateSigningRequests struct {
- *gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]
+ *gentype.ClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration]
}
// newCertificateSigningRequests returns a CertificateSigningRequests
func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests {
return &certificateSigningRequests{
- gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration](
+ gentype.NewClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration](
"certificatesigningrequests",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} },
- func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }),
+ func() *certificatesv1beta1.CertificateSigningRequest {
+ return &certificatesv1beta1.CertificateSigningRequest{}
+ },
+ func() *certificatesv1beta1.CertificateSigningRequestList {
+ return &certificatesv1beta1.CertificateSigningRequestList{}
+ },
+ gentype.PrefersProtobuf[*certificatesv1beta1.CertificateSigningRequest](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go
index e19469d53..427cb7e93 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/coordination/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ coordinationv1 "k8s.io/api/coordination/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := coordinationv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
index 97834d6ac..6e7784d6a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/coordination/v1"
+ coordinationv1 "k8s.io/api/coordination/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1"
+ applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type LeasesGetter interface {
// LeaseInterface has methods to work with Lease resources.
type LeaseInterface interface {
- Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error)
- Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error)
+ Create(ctx context.Context, lease *coordinationv1.Lease, opts metav1.CreateOptions) (*coordinationv1.Lease, error)
+ Update(ctx context.Context, lease *coordinationv1.Lease, opts metav1.UpdateOptions) (*coordinationv1.Lease, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*coordinationv1.Lease, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*coordinationv1.LeaseList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error)
- Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error)
+ Apply(ctx context.Context, lease *applyconfigurationscoordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *coordinationv1.Lease, err error)
LeaseExpansion
}
// leases implements LeaseInterface
type leases struct {
- *gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]
+ *gentype.ClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration]
}
// newLeases returns a Leases
func newLeases(c *CoordinationV1Client, namespace string) *leases {
return &leases{
- gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration](
+ gentype.NewClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration](
"leases",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Lease { return &v1.Lease{} },
- func() *v1.LeaseList { return &v1.LeaseList{} }),
+ func() *coordinationv1.Lease { return &coordinationv1.Lease{} },
+ func() *coordinationv1.LeaseList { return &coordinationv1.LeaseList{} },
+ gentype.PrefersProtobuf[*coordinationv1.Lease](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go
similarity index 65%
rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go
rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go
index dd75e5d01..4c286d463 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go
@@ -16,34 +16,34 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/coordination/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
-type CoordinationV1alpha1Interface interface {
+type CoordinationV1alpha2Interface interface {
RESTClient() rest.Interface
LeaseCandidatesGetter
}
-// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group.
-type CoordinationV1alpha1Client struct {
+// CoordinationV1alpha2Client is used to interact with features provided by the coordination.k8s.io group.
+type CoordinationV1alpha2Client struct {
restClient rest.Interface
}
-func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface {
+func (c *CoordinationV1alpha2Client) LeaseCandidates(namespace string) LeaseCandidateInterface {
return newLeaseCandidates(c, namespace)
}
-// NewForConfig creates a new CoordinationV1alpha1Client for the given config.
+// NewForConfig creates a new CoordinationV1alpha2Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) {
+func NewForConfig(c *rest.Config) (*CoordinationV1alpha2Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@@ -55,9 +55,9 @@ func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) {
return NewForConfigAndClient(&config, httpClient)
}
-// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client.
+// NewForConfigAndClient creates a new CoordinationV1alpha2Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
-func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) {
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha2Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
@@ -66,12 +66,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha
if err != nil {
return nil, err
}
- return &CoordinationV1alpha1Client{client}, nil
+ return &CoordinationV1alpha2Client{client}, nil
}
-// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and
+// NewForConfigOrDie creates a new CoordinationV1alpha2Client for the given config and
// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client {
+func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha2Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@@ -79,16 +79,16 @@ func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client {
return client
}
-// New creates a new CoordinationV1alpha1Client for the given RESTClient.
-func New(c rest.Interface) *CoordinationV1alpha1Client {
- return &CoordinationV1alpha1Client{c}
+// New creates a new CoordinationV1alpha2Client for the given RESTClient.
+func New(c rest.Interface) *CoordinationV1alpha2Client {
+ return &CoordinationV1alpha2Client{c}
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := coordinationv1alpha2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
@@ -99,7 +99,7 @@ func setConfigDefaults(config *rest.Config) error {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
-func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface {
+func (c *CoordinationV1alpha2Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go
similarity index 97%
rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go
rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go
index df51baa4d..baaf2d985 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go
@@ -17,4 +17,4 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
-package v1alpha1
+package v1alpha2
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go
similarity index 97%
rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go
rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go
index 2dc2f30cf..52af4786c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go
@@ -16,6 +16,6 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
type LeaseCandidateExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go
similarity index 52%
rename from vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go
rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go
index 868185135..c994a8893 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go
@@ -16,16 +16,16 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1alpha2
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/coordination/v1alpha1"
+ coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1"
+ applyconfigurationscoordinationv1alpha2 "k8s.io/client-go/applyconfigurations/coordination/v1alpha2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type LeaseCandidatesGetter interface {
// LeaseCandidateInterface has methods to work with LeaseCandidate resources.
type LeaseCandidateInterface interface {
- Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error)
- Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error)
+ Create(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.CreateOptions) (*coordinationv1alpha2.LeaseCandidate, error)
+ Update(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.UpdateOptions) (*coordinationv1alpha2.LeaseCandidate, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1alpha2.LeaseCandidate, error)
+ List(ctx context.Context, opts v1.ListOptions) (*coordinationv1alpha2.LeaseCandidateList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error)
- Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1alpha2.LeaseCandidate, err error)
+ Apply(ctx context.Context, leaseCandidate *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1alpha2.LeaseCandidate, err error)
LeaseCandidateExpansion
}
// leaseCandidates implements LeaseCandidateInterface
type leaseCandidates struct {
- *gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]
+ *gentype.ClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration]
}
// newLeaseCandidates returns a LeaseCandidates
-func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates {
+func newLeaseCandidates(c *CoordinationV1alpha2Client, namespace string) *leaseCandidates {
return &leaseCandidates{
- gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration](
+ gentype.NewClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration](
"leasecandidates",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} },
- func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }),
+ func() *coordinationv1alpha2.LeaseCandidate { return &coordinationv1alpha2.LeaseCandidate{} },
+ func() *coordinationv1alpha2.LeaseCandidateList { return &coordinationv1alpha2.LeaseCandidateList{} },
+ gentype.PrefersProtobuf[*coordinationv1alpha2.LeaseCandidate](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
index 27d674e23..1f1afba24 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/coordination/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := coordinationv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
index 62341e53b..18ca9823c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/coordination/v1beta1"
+ coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1"
+ applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type LeasesGetter interface {
// LeaseInterface has methods to work with Lease resources.
type LeaseInterface interface {
- Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error)
- Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error)
+ Create(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.CreateOptions) (*coordinationv1beta1.Lease, error)
+ Update(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.UpdateOptions) (*coordinationv1beta1.Lease, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1beta1.Lease, error)
+ List(ctx context.Context, opts v1.ListOptions) (*coordinationv1beta1.LeaseList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error)
- Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1beta1.Lease, err error)
+ Apply(ctx context.Context, lease *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1beta1.Lease, err error)
LeaseExpansion
}
// leases implements LeaseInterface
type leases struct {
- *gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]
+ *gentype.ClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration]
}
// newLeases returns a Leases
func newLeases(c *CoordinationV1beta1Client, namespace string) *leases {
return &leases{
- gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration](
+ gentype.NewClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration](
"leases",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Lease { return &v1beta1.Lease{} },
- func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }),
+ func() *coordinationv1beta1.Lease { return &coordinationv1beta1.Lease{} },
+ func() *coordinationv1beta1.LeaseList { return &coordinationv1beta1.LeaseList{} },
+ gentype.PrefersProtobuf[*coordinationv1beta1.Lease](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
index ab9458a5c..b8e58cd15 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ComponentStatusesGetter interface {
// ComponentStatusInterface has methods to work with ComponentStatus resources.
type ComponentStatusInterface interface {
- Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error)
- Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error)
+ Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.CreateOptions) (*corev1.ComponentStatus, error)
+ Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.UpdateOptions) (*corev1.ComponentStatus, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ComponentStatus, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ComponentStatusList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error)
- Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error)
+ Apply(ctx context.Context, componentStatus *applyconfigurationscorev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ComponentStatus, err error)
ComponentStatusExpansion
}
// componentStatuses implements ComponentStatusInterface
type componentStatuses struct {
- *gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration]
}
// newComponentStatuses returns a ComponentStatuses
func newComponentStatuses(c *CoreV1Client) *componentStatuses {
return &componentStatuses{
- gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration](
"componentstatuses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ComponentStatus { return &v1.ComponentStatus{} },
- func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }),
+ func() *corev1.ComponentStatus { return &corev1.ComponentStatus{} },
+ func() *corev1.ComponentStatusList { return &corev1.ComponentStatusList{} },
+ gentype.PrefersProtobuf[*corev1.ComponentStatus](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
index 72aa2361f..74d321193 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ConfigMapsGetter interface {
// ConfigMapInterface has methods to work with ConfigMap resources.
type ConfigMapInterface interface {
- Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error)
- Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error)
+ Create(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.CreateOptions) (*corev1.ConfigMap, error)
+ Update(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.UpdateOptions) (*corev1.ConfigMap, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ConfigMap, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ConfigMapList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error)
- Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error)
+ Apply(ctx context.Context, configMap *applyconfigurationscorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ConfigMap, err error)
ConfigMapExpansion
}
// configMaps implements ConfigMapInterface
type configMaps struct {
- *gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration]
}
// newConfigMaps returns a ConfigMaps
func newConfigMaps(c *CoreV1Client, namespace string) *configMaps {
return &configMaps{
- gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration](
"configmaps",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ConfigMap { return &v1.ConfigMap{} },
- func() *v1.ConfigMapList { return &v1.ConfigMapList{} }),
+ func() *corev1.ConfigMap { return &corev1.ConfigMap{} },
+ func() *corev1.ConfigMapList { return &corev1.ConfigMapList{} },
+ gentype.PrefersProtobuf[*corev1.ConfigMap](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
index 6e59e4cc6..abf85cba6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/core/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ corev1 "k8s.io/api/core/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -160,10 +160,10 @@ func New(c rest.Interface) *CoreV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := corev1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/api"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
index 9b9fc5fc1..b96a8b385 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EndpointsGetter interface {
// EndpointsInterface has methods to work with Endpoints resources.
type EndpointsInterface interface {
- Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error)
- Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error)
+ Create(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.CreateOptions) (*corev1.Endpoints, error)
+ Update(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.UpdateOptions) (*corev1.Endpoints, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Endpoints, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.EndpointsList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error)
- Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error)
+ Apply(ctx context.Context, endpoints *applyconfigurationscorev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Endpoints, err error)
EndpointsExpansion
}
// endpoints implements EndpointsInterface
type endpoints struct {
- *gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration]
}
// newEndpoints returns a Endpoints
func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
return &endpoints{
- gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration](
"endpoints",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Endpoints { return &v1.Endpoints{} },
- func() *v1.EndpointsList { return &v1.EndpointsList{} }),
+ func() *corev1.Endpoints { return &corev1.Endpoints{} },
+ func() *corev1.EndpointsList { return &corev1.EndpointsList{} },
+ gentype.PrefersProtobuf[*corev1.Endpoints](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
index 5ff0f0690..dd0cc80b8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EventsGetter interface {
// EventInterface has methods to work with Event resources.
type EventInterface interface {
- Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error)
- Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error)
+ Create(ctx context.Context, event *corev1.Event, opts metav1.CreateOptions) (*corev1.Event, error)
+ Update(ctx context.Context, event *corev1.Event, opts metav1.UpdateOptions) (*corev1.Event, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Event, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.EventList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error)
- Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Event, err error)
+ Apply(ctx context.Context, event *applyconfigurationscorev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Event, err error)
EventExpansion
}
// events implements EventInterface
type events struct {
- *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration]
}
// newEvents returns a Events
func newEvents(c *CoreV1Client, namespace string) *events {
return &events{
- gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration](
"events",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Event { return &v1.Event{} },
- func() *v1.EventList { return &v1.EventList{} }),
+ func() *corev1.Event { return &corev1.Event{} },
+ func() *corev1.EventList { return &corev1.EventList{} },
+ gentype.PrefersProtobuf[*corev1.Event](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
index f8e4048f9..51fa11d1b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type LimitRangesGetter interface {
// LimitRangeInterface has methods to work with LimitRange resources.
type LimitRangeInterface interface {
- Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error)
- Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error)
+ Create(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.CreateOptions) (*corev1.LimitRange, error)
+ Update(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.UpdateOptions) (*corev1.LimitRange, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.LimitRange, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.LimitRangeList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error)
- Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error)
+ Apply(ctx context.Context, limitRange *applyconfigurationscorev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.LimitRange, err error)
LimitRangeExpansion
}
// limitRanges implements LimitRangeInterface
type limitRanges struct {
- *gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration]
}
// newLimitRanges returns a LimitRanges
func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges {
return &limitRanges{
- gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration](
"limitranges",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.LimitRange { return &v1.LimitRange{} },
- func() *v1.LimitRangeList { return &v1.LimitRangeList{} }),
+ func() *corev1.LimitRange { return &corev1.LimitRange{} },
+ func() *corev1.LimitRangeList { return &corev1.LimitRangeList{} },
+ gentype.PrefersProtobuf[*corev1.LimitRange](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
index 75d20648f..323934938 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,35 +38,37 @@ type NamespacesGetter interface {
// NamespaceInterface has methods to work with Namespace resources.
type NamespaceInterface interface {
- Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error)
- Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
+ Create(ctx context.Context, namespace *corev1.Namespace, opts metav1.CreateOptions) (*corev1.Namespace, error)
+ Update(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
+ UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Namespace, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.NamespaceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error)
- Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error)
+ Apply(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error)
+ ApplyStatus(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error)
NamespaceExpansion
}
// namespaces implements NamespaceInterface
type namespaces struct {
- *gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration]
}
// newNamespaces returns a Namespaces
func newNamespaces(c *CoreV1Client) *namespaces {
return &namespaces{
- gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration](
"namespaces",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.Namespace { return &v1.Namespace{} },
- func() *v1.NamespaceList { return &v1.NamespaceList{} }),
+ func() *corev1.Namespace { return &corev1.Namespace{} },
+ func() *corev1.NamespaceList { return &corev1.NamespaceList{} },
+ gentype.PrefersProtobuf[*corev1.Namespace](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
index df1a7817f..1851b025f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type NodesGetter interface {
// NodeInterface has methods to work with Node resources.
type NodeInterface interface {
- Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error)
- Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ Create(ctx context.Context, node *corev1.Node, opts metav1.CreateOptions) (*corev1.Node, error)
+ Update(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ UpdateStatus(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Node, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.NodeList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error)
- Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Node, err error)
+ Apply(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
+ ApplyStatus(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error)
NodeExpansion
}
// nodes implements NodeInterface
type nodes struct {
- *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration]
}
// newNodes returns a Nodes
func newNodes(c *CoreV1Client) *nodes {
return &nodes{
- gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration](
"nodes",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.Node { return &v1.Node{} },
- func() *v1.NodeList { return &v1.NodeList{} }),
+ func() *corev1.Node { return &corev1.Node{} },
+ func() *corev1.NodeList { return &corev1.NodeList{} },
+ gentype.PrefersProtobuf[*corev1.Node](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
index 8be40f866..077a1ba4f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type PersistentVolumesGetter interface {
// PersistentVolumeInterface has methods to work with PersistentVolume resources.
type PersistentVolumeInterface interface {
- Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error)
- Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
+ Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.CreateOptions) (*corev1.PersistentVolume, error)
+ Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
+ UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolume, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error)
- Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error)
+ Apply(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error)
+ ApplyStatus(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error)
PersistentVolumeExpansion
}
// persistentVolumes implements PersistentVolumeInterface
type persistentVolumes struct {
- *gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration]
}
// newPersistentVolumes returns a PersistentVolumes
func newPersistentVolumes(c *CoreV1Client) *persistentVolumes {
return &persistentVolumes{
- gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration](
"persistentvolumes",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.PersistentVolume { return &v1.PersistentVolume{} },
- func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }),
+ func() *corev1.PersistentVolume { return &corev1.PersistentVolume{} },
+ func() *corev1.PersistentVolumeList { return &corev1.PersistentVolumeList{} },
+ gentype.PrefersProtobuf[*corev1.PersistentVolume](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
index 7721b0092..cbe75e812 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type PersistentVolumeClaimsGetter interface {
// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources.
type PersistentVolumeClaimInterface interface {
- Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error)
- Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
+ Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.CreateOptions) (*corev1.PersistentVolumeClaim, error)
+ Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
+ UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolumeClaim, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeClaimList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error)
- Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error)
+ Apply(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error)
+ ApplyStatus(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error)
PersistentVolumeClaimExpansion
}
// persistentVolumeClaims implements PersistentVolumeClaimInterface
type persistentVolumeClaims struct {
- *gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration]
}
// newPersistentVolumeClaims returns a PersistentVolumeClaims
func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims {
return &persistentVolumeClaims{
- gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration](
"persistentvolumeclaims",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} },
- func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }),
+ func() *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{} },
+ func() *corev1.PersistentVolumeClaimList { return &corev1.PersistentVolumeClaimList{} },
+ gentype.PrefersProtobuf[*corev1.PersistentVolumeClaim](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
index 470b7de7b..072a55941 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,46 +38,50 @@ type PodsGetter interface {
// PodInterface has methods to work with Pod resources.
type PodInterface interface {
- Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error)
- Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+ Create(ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error)
+ Update(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+ UpdateStatus(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error)
- Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Pod, err error)
+ Apply(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
- UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+ ApplyStatus(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error)
+ UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
+ UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error)
PodExpansion
}
// pods implements PodInterface
type pods struct {
- *gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration]
}
// newPods returns a Pods
func newPods(c *CoreV1Client, namespace string) *pods {
return &pods{
- gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration](
"pods",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Pod { return &v1.Pod{} },
- func() *v1.PodList { return &v1.PodList{} }),
+ func() *corev1.Pod { return &corev1.Pod{} },
+ func() *corev1.PodList { return &corev1.PodList{} },
+ gentype.PrefersProtobuf[*corev1.Pod](),
+ ),
}
}
// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
-func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
- result = &v1.Pod{}
+func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) {
+ result = &corev1.Pod{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("pods").
Name(podName).
@@ -88,3 +92,19 @@ func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, po
Into(result)
return
}
+
+// UpdateResize takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) {
+ result = &corev1.Pod{}
+ err = c.GetClient().Put().
+ UseProtobufAsDefault().
+ Namespace(c.GetNamespace()).
+ Resource("pods").
+ Name(podName).
+ SubResource("resize").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pod).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
index 060a05909..b0cfa1bc1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type PodTemplatesGetter interface {
// PodTemplateInterface has methods to work with PodTemplate resources.
type PodTemplateInterface interface {
- Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error)
- Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error)
+ Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.CreateOptions) (*corev1.PodTemplate, error)
+ Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.UpdateOptions) (*corev1.PodTemplate, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PodTemplate, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodTemplateList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error)
- Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error)
+ Apply(ctx context.Context, podTemplate *applyconfigurationscorev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PodTemplate, err error)
PodTemplateExpansion
}
// podTemplates implements PodTemplateInterface
type podTemplates struct {
- *gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration]
}
// newPodTemplates returns a PodTemplates
func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates {
return &podTemplates{
- gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration](
"podtemplates",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.PodTemplate { return &v1.PodTemplate{} },
- func() *v1.PodTemplateList { return &v1.PodTemplateList{} }),
+ func() *corev1.PodTemplate { return &corev1.PodTemplate{} },
+ func() *corev1.PodTemplateList { return &corev1.PodTemplateList{} },
+ gentype.PrefersProtobuf[*corev1.PodTemplate](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
index 9b275ed1b..f8a7c9285 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
autoscalingv1 "k8s.io/api/autoscaling/v1"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -39,19 +39,19 @@ type ReplicationControllersGetter interface {
// ReplicationControllerInterface has methods to work with ReplicationController resources.
type ReplicationControllerInterface interface {
- Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error)
- Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
+ Create(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.CreateOptions) (*corev1.ReplicationController, error)
+ Update(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
+ UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ReplicationController, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ReplicationControllerList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error)
- Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error)
+ Apply(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error)
+ ApplyStatus(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error)
GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
@@ -60,19 +60,21 @@ type ReplicationControllerInterface interface {
// replicationControllers implements ReplicationControllerInterface
type replicationControllers struct {
- *gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration]
}
// newReplicationControllers returns a ReplicationControllers
func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers {
return &replicationControllers{
- gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration](
"replicationcontrollers",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ReplicationController { return &v1.ReplicationController{} },
- func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }),
+ func() *corev1.ReplicationController { return &corev1.ReplicationController{} },
+ func() *corev1.ReplicationControllerList { return &corev1.ReplicationControllerList{} },
+ gentype.PrefersProtobuf[*corev1.ReplicationController](),
+ ),
}
}
@@ -80,6 +82,7 @@ func newReplicationControllers(c *CoreV1Client, namespace string) *replicationCo
func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicationcontrollers").
Name(replicationControllerName).
@@ -94,6 +97,7 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro
func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
result = &autoscalingv1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicationcontrollers").
Name(replicationControllerName).
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
index 4b2dcd3b5..a0435accc 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type ResourceQuotasGetter interface {
// ResourceQuotaInterface has methods to work with ResourceQuota resources.
type ResourceQuotaInterface interface {
- Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error)
- Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
+ Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.CreateOptions) (*corev1.ResourceQuota, error)
+ Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
+ UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ResourceQuota, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ResourceQuotaList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error)
- Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error)
+ Apply(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error)
+ ApplyStatus(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error)
ResourceQuotaExpansion
}
// resourceQuotas implements ResourceQuotaInterface
type resourceQuotas struct {
- *gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration]
}
// newResourceQuotas returns a ResourceQuotas
func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas {
return &resourceQuotas{
- gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration](
"resourcequotas",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ResourceQuota { return &v1.ResourceQuota{} },
- func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }),
+ func() *corev1.ResourceQuota { return &corev1.ResourceQuota{} },
+ func() *corev1.ResourceQuotaList { return &corev1.ResourceQuotaList{} },
+ gentype.PrefersProtobuf[*corev1.ResourceQuota](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
index 12a8d1178..a7ab56a27 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type SecretsGetter interface {
// SecretInterface has methods to work with Secret resources.
type SecretInterface interface {
- Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error)
- Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error)
+ Create(ctx context.Context, secret *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error)
+ Update(ctx context.Context, secret *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Secret, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.SecretList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error)
- Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Secret, err error)
+ Apply(ctx context.Context, secret *applyconfigurationscorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Secret, err error)
SecretExpansion
}
// secrets implements SecretInterface
type secrets struct {
- *gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration]
}
// newSecrets returns a Secrets
func newSecrets(c *CoreV1Client, namespace string) *secrets {
return &secrets{
- gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration](
"secrets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Secret { return &v1.Secret{} },
- func() *v1.SecretList { return &v1.SecretList{} }),
+ func() *corev1.Secret { return &corev1.Secret{} },
+ func() *corev1.SecretList { return &corev1.SecretList{} },
+ gentype.PrefersProtobuf[*corev1.Secret](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
index ec935a324..f145a137c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,35 +38,37 @@ type ServicesGetter interface {
// ServiceInterface has methods to work with Service resources.
type ServiceInterface interface {
- Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error)
- Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+ Create(ctx context.Context, service *corev1.Service, opts metav1.CreateOptions) (*corev1.Service, error)
+ Update(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+ UpdateStatus(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Service, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error)
- Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Service, err error)
+ Apply(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error)
+ ApplyStatus(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error)
ServiceExpansion
}
// services implements ServiceInterface
type services struct {
- *gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration]
}
// newServices returns a Services
func newServices(c *CoreV1Client, namespace string) *services {
return &services{
- gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration](
"services",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Service { return &v1.Service{} },
- func() *v1.ServiceList { return &v1.ServiceList{} }),
+ func() *corev1.Service { return &corev1.Service{} },
+ func() *corev1.ServiceList { return &corev1.ServiceList{} },
+ gentype.PrefersProtobuf[*corev1.Service](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
index eb995d454..8458b6d9b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
@@ -19,14 +19,14 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
authenticationv1 "k8s.io/api/authentication/v1"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- corev1 "k8s.io/client-go/applyconfigurations/core/v1"
+ applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -39,15 +39,15 @@ type ServiceAccountsGetter interface {
// ServiceAccountInterface has methods to work with ServiceAccount resources.
type ServiceAccountInterface interface {
- Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error)
- Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error)
+ Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.CreateOptions) (*corev1.ServiceAccount, error)
+ Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.UpdateOptions) (*corev1.ServiceAccount, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ServiceAccount, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceAccountList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error)
- Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error)
+ Apply(ctx context.Context, serviceAccount *applyconfigurationscorev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ServiceAccount, err error)
CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error)
ServiceAccountExpansion
@@ -55,19 +55,21 @@ type ServiceAccountInterface interface {
// serviceAccounts implements ServiceAccountInterface
type serviceAccounts struct {
- *gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]
+ *gentype.ClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration]
}
// newServiceAccounts returns a ServiceAccounts
func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts {
return &serviceAccounts{
- gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration](
+ gentype.NewClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration](
"serviceaccounts",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.ServiceAccount { return &v1.ServiceAccount{} },
- func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }),
+ func() *corev1.ServiceAccount { return &corev1.ServiceAccount{} },
+ func() *corev1.ServiceAccountList { return &corev1.ServiceAccountList{} },
+ gentype.PrefersProtobuf[*corev1.ServiceAccount](),
+ ),
}
}
@@ -75,6 +77,7 @@ func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts {
func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) {
result = &authenticationv1.TokenRequest{}
err = c.GetClient().Post().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("serviceaccounts").
Name(serviceAccountName).
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
index 9041443b3..fbc685df8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/discovery/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ discoveryv1 "k8s.io/api/discovery/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := discoveryv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
index 1f927055c..75b9a559e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/discovery/v1"
+ discoveryv1 "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1"
+ applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EndpointSlicesGetter interface {
// EndpointSliceInterface has methods to work with EndpointSlice resources.
type EndpointSliceInterface interface {
- Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (*v1.EndpointSlice, error)
- Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (*v1.EndpointSlice, error)
+ Create(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.CreateOptions) (*discoveryv1.EndpointSlice, error)
+ Update(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.UpdateOptions) (*discoveryv1.EndpointSlice, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EndpointSlice, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointSliceList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*discoveryv1.EndpointSlice, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*discoveryv1.EndpointSliceList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error)
- Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *discoveryv1.EndpointSlice, err error)
+ Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *discoveryv1.EndpointSlice, err error)
EndpointSliceExpansion
}
// endpointSlices implements EndpointSliceInterface
type endpointSlices struct {
- *gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]
+ *gentype.ClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration]
}
// newEndpointSlices returns a EndpointSlices
func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices {
return &endpointSlices{
- gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration](
+ gentype.NewClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration](
"endpointslices",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.EndpointSlice { return &v1.EndpointSlice{} },
- func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }),
+ func() *discoveryv1.EndpointSlice { return &discoveryv1.EndpointSlice{} },
+ func() *discoveryv1.EndpointSliceList { return &discoveryv1.EndpointSliceList{} },
+ gentype.PrefersProtobuf[*discoveryv1.EndpointSlice](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
index 193d5e9eb..908446c6d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/discovery/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := discoveryv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
index 298cfbc87..4ef2752e7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/discovery/v1beta1"
+ discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1"
+ applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EndpointSlicesGetter interface {
// EndpointSliceInterface has methods to work with EndpointSlice resources.
type EndpointSliceInterface interface {
- Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error)
- Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error)
+ Create(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.CreateOptions) (*discoveryv1beta1.EndpointSlice, error)
+ Update(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.UpdateOptions) (*discoveryv1beta1.EndpointSlice, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*discoveryv1beta1.EndpointSlice, error)
+ List(ctx context.Context, opts v1.ListOptions) (*discoveryv1beta1.EndpointSliceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error)
- Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *discoveryv1beta1.EndpointSlice, err error)
+ Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *discoveryv1beta1.EndpointSlice, err error)
EndpointSliceExpansion
}
// endpointSlices implements EndpointSliceInterface
type endpointSlices struct {
- *gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]
+ *gentype.ClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration]
}
// newEndpointSlices returns a EndpointSlices
func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices {
return &endpointSlices{
- gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration](
+ gentype.NewClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration](
"endpointslices",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} },
- func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }),
+ func() *discoveryv1beta1.EndpointSlice { return &discoveryv1beta1.EndpointSlice{} },
+ func() *discoveryv1beta1.EndpointSliceList { return &discoveryv1beta1.EndpointSliceList{} },
+ gentype.PrefersProtobuf[*discoveryv1beta1.EndpointSlice](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
index d021a76c4..fd3358476 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/events/v1"
+ eventsv1 "k8s.io/api/events/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- eventsv1 "k8s.io/client-go/applyconfigurations/events/v1"
+ applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EventsGetter interface {
// EventInterface has methods to work with Event resources.
type EventInterface interface {
- Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error)
- Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error)
+ Create(ctx context.Context, event *eventsv1.Event, opts metav1.CreateOptions) (*eventsv1.Event, error)
+ Update(ctx context.Context, event *eventsv1.Event, opts metav1.UpdateOptions) (*eventsv1.Event, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*eventsv1.Event, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*eventsv1.EventList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error)
- Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *eventsv1.Event, err error)
+ Apply(ctx context.Context, event *applyconfigurationseventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *eventsv1.Event, err error)
EventExpansion
}
// events implements EventInterface
type events struct {
- *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]
+ *gentype.ClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration]
}
// newEvents returns a Events
func newEvents(c *EventsV1Client, namespace string) *events {
return &events{
- gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration](
+ gentype.NewClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration](
"events",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Event { return &v1.Event{} },
- func() *v1.EventList { return &v1.EventList{} }),
+ func() *eventsv1.Event { return &eventsv1.Event{} },
+ func() *eventsv1.EventList { return &eventsv1.EventList{} },
+ gentype.PrefersProtobuf[*eventsv1.Event](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go
index 8c73918d1..959ff5f81 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/events/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ eventsv1 "k8s.io/api/events/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := eventsv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
index 77ca2e775..c18a1aeb6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/events/v1beta1"
+ eventsv1beta1 "k8s.io/api/events/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1"
+ applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type EventsGetter interface {
// EventInterface has methods to work with Event resources.
type EventInterface interface {
- Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error)
- Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error)
+ Create(ctx context.Context, event *eventsv1beta1.Event, opts v1.CreateOptions) (*eventsv1beta1.Event, error)
+ Update(ctx context.Context, event *eventsv1beta1.Event, opts v1.UpdateOptions) (*eventsv1beta1.Event, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*eventsv1beta1.Event, error)
+ List(ctx context.Context, opts v1.ListOptions) (*eventsv1beta1.EventList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error)
- Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *eventsv1beta1.Event, err error)
+ Apply(ctx context.Context, event *applyconfigurationseventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *eventsv1beta1.Event, err error)
EventExpansion
}
// events implements EventInterface
type events struct {
- *gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]
+ *gentype.ClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration]
}
// newEvents returns a Events
func newEvents(c *EventsV1beta1Client, namespace string) *events {
return &events{
- gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration](
+ gentype.NewClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration](
"events",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Event { return &v1beta1.Event{} },
- func() *v1beta1.EventList { return &v1beta1.EventList{} }),
+ func() *eventsv1beta1.Event { return &eventsv1beta1.Event{} },
+ func() *eventsv1beta1.EventList { return &eventsv1beta1.EventList{} },
+ gentype.PrefersProtobuf[*eventsv1beta1.Event](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
index 66506bf88..0bfc3cb60 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/events/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ eventsv1beta1 "k8s.io/api/events/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := eventsv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
index f86194bf0..c04be73a3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
+ applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type DaemonSetsGetter interface {
// DaemonSetInterface has methods to work with DaemonSet resources.
type DaemonSetInterface interface {
- Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error)
- Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
+ Create(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.CreateOptions) (*extensionsv1beta1.DaemonSet, error)
+ Update(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
+ UpdateStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.DaemonSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DaemonSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error)
- Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.DaemonSet, err error)
+ Apply(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error)
+ ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error)
DaemonSetExpansion
}
// daemonSets implements DaemonSetInterface
type daemonSets struct {
- *gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration]
}
// newDaemonSets returns a DaemonSets
func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets {
return &daemonSets{
- gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration](
"daemonsets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} },
- func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }),
+ func() *extensionsv1beta1.DaemonSet { return &extensionsv1beta1.DaemonSet{} },
+ func() *extensionsv1beta1.DaemonSetList { return &extensionsv1beta1.DaemonSetList{} },
+ gentype.PrefersProtobuf[*extensionsv1beta1.DaemonSet](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
index 021fbb3b3..1bcf3cbc8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1beta1
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
+ applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// DeploymentsGetter has a method to return a DeploymentInterface.
@@ -40,48 +40,51 @@ type DeploymentsGetter interface {
// DeploymentInterface has methods to work with Deployment resources.
type DeploymentInterface interface {
- Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
- Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+ Create(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.CreateOptions) (*extensionsv1beta1.Deployment, error)
+ Update(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+ UpdateStatus(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Deployment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DeploymentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
- Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Deployment, err error)
+ Apply(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
- GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
- UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
- ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error)
+ ApplyStatus(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error)
+ GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error)
+ UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error)
+ ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error)
DeploymentExpansion
}
// deployments implements DeploymentInterface
type deployments struct {
- *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration]
}
// newDeployments returns a Deployments
func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments {
return &deployments{
- gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration](
"deployments",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Deployment { return &v1beta1.Deployment{} },
- func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }),
+ func() *extensionsv1beta1.Deployment { return &extensionsv1beta1.Deployment{} },
+ func() *extensionsv1beta1.DeploymentList { return &extensionsv1beta1.DeploymentList{} },
+ gentype.PrefersProtobuf[*extensionsv1beta1.Deployment](),
+ ),
}
}
-// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any.
-func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
- result = &v1beta1.Scale{}
+// GetScale takes name of the deployment, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any.
+func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) {
+ result = &extensionsv1beta1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
@@ -93,9 +96,10 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio
}
// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
- result = &v1beta1.Scale{}
+func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) {
+ result = &extensionsv1beta1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
@@ -109,24 +113,24 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc
// ApplyScale takes top resource name and the apply declarative configuration for scale,
// applies it and returns the applied scale, and an error, if there is any.
-func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) {
+func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) {
if scale == nil {
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
- result = &v1beta1.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ result = &extensionsv1beta1.Scale{}
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("deployments").
Name(deploymentName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
index 4725d2cd1..88f2279bb 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/extensions/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -105,10 +105,10 @@ func New(c rest.Interface) *ExtensionsV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := extensionsv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
index 4511c93fc..9a24621ea 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
+ applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type IngressesGetter interface {
// IngressInterface has methods to work with Ingress resources.
type IngressInterface interface {
- Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
- Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+ Create(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.CreateOptions) (*extensionsv1beta1.Ingress, error)
+ Update(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+ UpdateStatus(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Ingress, error)
+ List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.IngressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
- Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Ingress, err error)
+ Apply(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
+ ApplyStatus(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error)
IngressExpansion
}
// ingresses implements IngressInterface
type ingresses struct {
- *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]
+ *gentype.ClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration]
}
// newIngresses returns a Ingresses
func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses {
return &ingresses{
- gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration](
+ gentype.NewClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration](
"ingresses",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Ingress { return &v1beta1.Ingress{} },
- func() *v1beta1.IngressList { return &v1beta1.IngressList{} }),
+ func() *extensionsv1beta1.Ingress { return &extensionsv1beta1.Ingress{} },
+ func() *extensionsv1beta1.IngressList { return &extensionsv1beta1.IngressList{} },
+ gentype.PrefersProtobuf[*extensionsv1beta1.Ingress](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
index afa8203c3..ce6a45a27 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
+ applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type NetworkPoliciesGetter interface {
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
type NetworkPolicyInterface interface {
- Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error)
- Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error)
+ Create(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.CreateOptions) (*extensionsv1beta1.NetworkPolicy, error)
+ Update(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.UpdateOptions) (*extensionsv1beta1.NetworkPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.NetworkPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.NetworkPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error)
- Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.NetworkPolicy, err error)
+ Apply(ctx context.Context, networkPolicy *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
// networkPolicies implements NetworkPolicyInterface
type networkPolicies struct {
- *gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]
+ *gentype.ClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration]
}
// newNetworkPolicies returns a NetworkPolicies
func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies {
return &networkPolicies{
- gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration](
+ gentype.NewClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration](
"networkpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} },
- func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }),
+ func() *extensionsv1beta1.NetworkPolicy { return &extensionsv1beta1.NetworkPolicy{} },
+ func() *extensionsv1beta1.NetworkPolicyList { return &extensionsv1beta1.NetworkPolicyList{} },
+ gentype.PrefersProtobuf[*extensionsv1beta1.NetworkPolicy](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
index 8973948f3..f918be417 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
@@ -19,17 +19,17 @@ limitations under the License.
package v1beta1
import (
- "context"
- json "encoding/json"
- "fmt"
+ context "context"
+ fmt "fmt"
- v1beta1 "k8s.io/api/extensions/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
+ applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
+ apply "k8s.io/client-go/util/apply"
)
// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
@@ -40,48 +40,51 @@ type ReplicaSetsGetter interface {
// ReplicaSetInterface has methods to work with ReplicaSet resources.
type ReplicaSetInterface interface {
- Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error)
- Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
+ Create(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.CreateOptions) (*extensionsv1beta1.ReplicaSet, error)
+ Update(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
+ UpdateStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.ReplicaSet, error)
+ List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.ReplicaSetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error)
- Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.ReplicaSet, err error)
+ Apply(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error)
- GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
- UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
- ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error)
+ ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error)
+ GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error)
+ UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error)
+ ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error)
ReplicaSetExpansion
}
// replicaSets implements ReplicaSetInterface
type replicaSets struct {
- *gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration]
}
// newReplicaSets returns a ReplicaSets
func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets {
return &replicaSets{
- gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration](
"replicasets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} },
- func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }),
+ func() *extensionsv1beta1.ReplicaSet { return &extensionsv1beta1.ReplicaSet{} },
+ func() *extensionsv1beta1.ReplicaSetList { return &extensionsv1beta1.ReplicaSetList{} },
+ gentype.PrefersProtobuf[*extensionsv1beta1.ReplicaSet](),
+ ),
}
}
-// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any.
-func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
- result = &v1beta1.Scale{}
+// GetScale takes name of the replicaSet, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any.
+func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) {
+ result = &extensionsv1beta1.Scale{}
err = c.GetClient().Get().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
@@ -93,9 +96,10 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio
}
// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
- result = &v1beta1.Scale{}
+func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) {
+ result = &extensionsv1beta1.Scale{}
err = c.GetClient().Put().
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
@@ -109,24 +113,24 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc
// ApplyScale takes top resource name and the apply declarative configuration for scale,
// applies it and returns the applied scale, and an error, if there is any.
-func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) {
+func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) {
if scale == nil {
return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
}
patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(scale)
+ request, err := apply.NewRequest(c.GetClient(), scale)
if err != nil {
return nil, err
}
- result = &v1beta1.Scale{}
- err = c.GetClient().Patch(types.ApplyPatchType).
+ result = &extensionsv1beta1.Scale{}
+ err = request.
+ UseProtobufAsDefault().
Namespace(c.GetNamespace()).
Resource("replicasets").
Name(replicaSetName).
SubResource("scale").
VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go
index 3d7d93ef1..3b19586e9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/flowcontrol/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := flowcontrolv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
index 2606cee07..56d4d8065 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
+ applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type FlowSchemasGetter interface {
// FlowSchemaInterface has methods to work with FlowSchema resources.
type FlowSchemaInterface interface {
- Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error)
- Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error)
+ Create(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.CreateOptions) (*flowcontrolv1.FlowSchema, error)
+ Update(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error)
+ UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.FlowSchema, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.FlowSchemaList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error)
- Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.FlowSchema, err error)
+ Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error)
+ ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error)
FlowSchemaExpansion
}
// flowSchemas implements FlowSchemaInterface
type flowSchemas struct {
- *gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration]
}
// newFlowSchemas returns a FlowSchemas
func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas {
return &flowSchemas{
- gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration](
"flowschemas",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.FlowSchema { return &v1.FlowSchema{} },
- func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }),
+ func() *flowcontrolv1.FlowSchema { return &flowcontrolv1.FlowSchema{} },
+ func() *flowcontrolv1.FlowSchemaList { return &flowcontrolv1.FlowSchemaList{} },
+ gentype.PrefersProtobuf[*flowcontrolv1.FlowSchema](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
index 64907af60..5d25f393a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/flowcontrol/v1"
+ flowcontrolv1 "k8s.io/api/flowcontrol/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
+ applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,40 @@ type PriorityLevelConfigurationsGetter interface {
// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources.
type PriorityLevelConfigurationInterface interface {
- Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error)
- Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error)
+ Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error)
+ Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error)
+ UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.PriorityLevelConfiguration, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.PriorityLevelConfigurationList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error)
- Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.PriorityLevelConfiguration, err error)
+ Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error)
+ ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error)
PriorityLevelConfigurationExpansion
}
// priorityLevelConfigurations implements PriorityLevelConfigurationInterface
type priorityLevelConfigurations struct {
- *gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration]
}
// newPriorityLevelConfigurations returns a PriorityLevelConfigurations
func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations {
return &priorityLevelConfigurations{
- gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration](
"prioritylevelconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} },
- func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }),
+ func() *flowcontrolv1.PriorityLevelConfiguration { return &flowcontrolv1.PriorityLevelConfiguration{} },
+ func() *flowcontrolv1.PriorityLevelConfigurationList {
+ return &flowcontrolv1.PriorityLevelConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*flowcontrolv1.PriorityLevelConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go
index c29cfca95..ac3f5ffe8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := flowcontrolv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
index 3c6805b9b..f0def3947 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
+ applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type FlowSchemasGetter interface {
// FlowSchemaInterface has methods to work with FlowSchema resources.
type FlowSchemaInterface interface {
- Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error)
- Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error)
+ Create(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta1.FlowSchema, error)
+ Update(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error)
+ UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.FlowSchema, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.FlowSchemaList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error)
- Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.FlowSchema, err error)
+ Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error)
+ ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error)
FlowSchemaExpansion
}
// flowSchemas implements FlowSchemaInterface
type flowSchemas struct {
- *gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration]
}
// newFlowSchemas returns a FlowSchemas
func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas {
return &flowSchemas{
- gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration](
"flowschemas",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} },
- func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }),
+ func() *flowcontrolv1beta1.FlowSchema { return &flowcontrolv1beta1.FlowSchema{} },
+ func() *flowcontrolv1beta1.FlowSchemaList { return &flowcontrolv1beta1.FlowSchemaList{} },
+ gentype.PrefersProtobuf[*flowcontrolv1beta1.FlowSchema](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
index 049f4049d..15ee1b8b5 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
+ applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface {
// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources.
type PriorityLevelConfigurationInterface interface {
- Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error)
- Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error)
+ Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error)
+ Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error)
+ UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.PriorityLevelConfigurationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error)
- Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error)
+ Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error)
+ ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error)
PriorityLevelConfigurationExpansion
}
// priorityLevelConfigurations implements PriorityLevelConfigurationInterface
type priorityLevelConfigurations struct {
- *gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]
}
// newPriorityLevelConfigurations returns a PriorityLevelConfigurations
func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations {
return &priorityLevelConfigurations{
- gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration](
"prioritylevelconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} },
- func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }),
+ func() *flowcontrolv1beta1.PriorityLevelConfiguration {
+ return &flowcontrolv1beta1.PriorityLevelConfiguration{}
+ },
+ func() *flowcontrolv1beta1.PriorityLevelConfigurationList {
+ return &flowcontrolv1beta1.PriorityLevelConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*flowcontrolv1beta1.PriorityLevelConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go
index f3cca4fc7..7652d4f39 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta2
import (
- "net/http"
+ http "net/http"
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
- "k8s.io/client-go/kubernetes/scheme"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta2Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta2.SchemeGroupVersion
+ gv := flowcontrolv1beta2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
index 270615762..780cf030e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
+ applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type FlowSchemasGetter interface {
// FlowSchemaInterface has methods to work with FlowSchema resources.
type FlowSchemaInterface interface {
- Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error)
- Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error)
+ Create(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta2.FlowSchema, error)
+ Update(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error)
+ UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.FlowSchema, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.FlowSchemaList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.FlowSchema, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.FlowSchemaList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error)
- Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.FlowSchema, err error)
+ Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error)
+ ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error)
FlowSchemaExpansion
}
// flowSchemas implements FlowSchemaInterface
type flowSchemas struct {
- *gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration]
}
// newFlowSchemas returns a FlowSchemas
func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas {
return &flowSchemas{
- gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration](
"flowschemas",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} },
- func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }),
+ func() *flowcontrolv1beta2.FlowSchema { return &flowcontrolv1beta2.FlowSchema{} },
+ func() *flowcontrolv1beta2.FlowSchemaList { return &flowcontrolv1beta2.FlowSchemaList{} },
+ gentype.PrefersProtobuf[*flowcontrolv1beta2.FlowSchema](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
index 00ead4c60..65b9feafa 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta2
import (
- "context"
+ context "context"
- v1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
+ applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface {
// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources.
type PriorityLevelConfigurationInterface interface {
- Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error)
- Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error)
+ Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error)
+ Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error)
+ UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PriorityLevelConfiguration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PriorityLevelConfigurationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.PriorityLevelConfigurationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error)
- Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error)
+ Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error)
+ ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error)
PriorityLevelConfigurationExpansion
}
// priorityLevelConfigurations implements PriorityLevelConfigurationInterface
type priorityLevelConfigurations struct {
- *gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]
}
// newPriorityLevelConfigurations returns a PriorityLevelConfigurations
func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations {
return &priorityLevelConfigurations{
- gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration](
"prioritylevelconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} },
- func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }),
+ func() *flowcontrolv1beta2.PriorityLevelConfiguration {
+ return &flowcontrolv1beta2.PriorityLevelConfiguration{}
+ },
+ func() *flowcontrolv1beta2.PriorityLevelConfigurationList {
+ return &flowcontrolv1beta2.PriorityLevelConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*flowcontrolv1beta2.PriorityLevelConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go
index 461120bd3..b32dc911c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta3
import (
- "net/http"
+ http "net/http"
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
- "k8s.io/client-go/kubernetes/scheme"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta3Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta3.SchemeGroupVersion
+ gv := flowcontrolv1beta3.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
index 35f600cdf..1e0d9feb1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- "context"
+ context "context"
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
+ applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type FlowSchemasGetter interface {
// FlowSchemaInterface has methods to work with FlowSchema resources.
type FlowSchemaInterface interface {
- Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error)
- Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error)
+ Create(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta3.FlowSchema, error)
+ Update(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error)
+ UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.FlowSchema, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta3.FlowSchemaList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.FlowSchema, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.FlowSchemaList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error)
- Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.FlowSchema, err error)
+ Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error)
+ ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error)
FlowSchemaExpansion
}
// flowSchemas implements FlowSchemaInterface
type flowSchemas struct {
- *gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration]
}
// newFlowSchemas returns a FlowSchemas
func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas {
return &flowSchemas{
- gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration](
"flowschemas",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} },
- func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }),
+ func() *flowcontrolv1beta3.FlowSchema { return &flowcontrolv1beta3.FlowSchema{} },
+ func() *flowcontrolv1beta3.FlowSchemaList { return &flowcontrolv1beta3.FlowSchemaList{} },
+ gentype.PrefersProtobuf[*flowcontrolv1beta3.FlowSchema](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
index 93842e0cf..91bbf3fb1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta3
import (
- "context"
+ context "context"
- v1beta3 "k8s.io/api/flowcontrol/v1beta3"
+ flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
+ applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type PriorityLevelConfigurationsGetter interface {
// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources.
type PriorityLevelConfigurationInterface interface {
- Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error)
- Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error)
+ Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error)
+ Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error)
+ UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.PriorityLevelConfiguration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta3.PriorityLevelConfigurationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.PriorityLevelConfigurationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error)
- Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error)
+ Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error)
+ ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error)
PriorityLevelConfigurationExpansion
}
// priorityLevelConfigurations implements PriorityLevelConfigurationInterface
type priorityLevelConfigurations struct {
- *gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]
}
// newPriorityLevelConfigurations returns a PriorityLevelConfigurations
func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations {
return &priorityLevelConfigurations{
- gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration](
"prioritylevelconfigurations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} },
- func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }),
+ func() *flowcontrolv1beta3.PriorityLevelConfiguration {
+ return &flowcontrolv1beta3.PriorityLevelConfiguration{}
+ },
+ func() *flowcontrolv1beta3.PriorityLevelConfigurationList {
+ return &flowcontrolv1beta3.PriorityLevelConfigurationList{}
+ },
+ gentype.PrefersProtobuf[*flowcontrolv1beta3.PriorityLevelConfiguration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
index afaff4912..76d416249 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/networking/v1"
+ networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
+ applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type IngressesGetter interface {
// IngressInterface has methods to work with Ingress resources.
type IngressInterface interface {
- Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error)
- Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ Create(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.CreateOptions) (*networkingv1.Ingress, error)
+ Update(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ UpdateStatus(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.Ingress, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error)
- Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.Ingress, err error)
+ Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
+ ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error)
IngressExpansion
}
// ingresses implements IngressInterface
type ingresses struct {
- *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration]
}
// newIngresses returns a Ingresses
func newIngresses(c *NetworkingV1Client, namespace string) *ingresses {
return &ingresses{
- gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration](
"ingresses",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Ingress { return &v1.Ingress{} },
- func() *v1.IngressList { return &v1.IngressList{} }),
+ func() *networkingv1.Ingress { return &networkingv1.Ingress{} },
+ func() *networkingv1.IngressList { return &networkingv1.IngressList{} },
+ gentype.PrefersProtobuf[*networkingv1.Ingress](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
index 3301e8799..3bbbf9e15 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/networking/v1"
+ networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
+ applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type IngressClassesGetter interface {
// IngressClassInterface has methods to work with IngressClass resources.
type IngressClassInterface interface {
- Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (*v1.IngressClass, error)
- Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (*v1.IngressClass, error)
+ Create(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.CreateOptions) (*networkingv1.IngressClass, error)
+ Update(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.UpdateOptions) (*networkingv1.IngressClass, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IngressClass, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressClassList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.IngressClass, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressClassList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error)
- Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.IngressClass, err error)
+ Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.IngressClass, err error)
IngressClassExpansion
}
// ingressClasses implements IngressClassInterface
type ingressClasses struct {
- *gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration]
}
// newIngressClasses returns a IngressClasses
func newIngressClasses(c *NetworkingV1Client) *ingressClasses {
return &ingressClasses{
- gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration](
"ingressclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.IngressClass { return &v1.IngressClass{} },
- func() *v1.IngressClassList { return &v1.IngressClassList{} }),
+ func() *networkingv1.IngressClass { return &networkingv1.IngressClass{} },
+ func() *networkingv1.IngressClassList { return &networkingv1.IngressClassList{} },
+ gentype.PrefersProtobuf[*networkingv1.IngressClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
index 3b72a7ae9..692b52f02 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/networking/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ networkingv1 "k8s.io/api/networking/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -95,10 +95,10 @@ func New(c rest.Interface) *NetworkingV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := networkingv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
index ba2ef32db..2758c2bfb 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/networking/v1"
+ networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
+ applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type NetworkPoliciesGetter interface {
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
type NetworkPolicyInterface interface {
- Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error)
- Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error)
+ Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.CreateOptions) (*networkingv1.NetworkPolicy, error)
+ Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.UpdateOptions) (*networkingv1.NetworkPolicy, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.NetworkPolicy, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.NetworkPolicyList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error)
- Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error)
+ Apply(ctx context.Context, networkPolicy *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
// networkPolicies implements NetworkPolicyInterface
type networkPolicies struct {
- *gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration]
}
// newNetworkPolicies returns a NetworkPolicies
func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
return &networkPolicies{
- gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration](
"networkpolicies",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} },
- func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }),
+ func() *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{} },
+ func() *networkingv1.NetworkPolicyList { return &networkingv1.NetworkPolicyList{} },
+ gentype.PrefersProtobuf[*networkingv1.NetworkPolicy](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
index 33e90d18a..e96a564ab 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/networking/v1alpha1"
+ networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
+ applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type IPAddressesGetter interface {
// IPAddressInterface has methods to work with IPAddress resources.
type IPAddressInterface interface {
- Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error)
- Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error)
+ Create(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.CreateOptions) (*networkingv1alpha1.IPAddress, error)
+ Update(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.UpdateOptions) (*networkingv1alpha1.IPAddress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.IPAddress, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.IPAddressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error)
- Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.IPAddress, err error)
+ Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.IPAddress, err error)
IPAddressExpansion
}
// iPAddresses implements IPAddressInterface
type iPAddresses struct {
- *gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration]
}
// newIPAddresses returns a IPAddresses
func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses {
return &iPAddresses{
- gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration](
"ipaddresses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} },
- func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }),
+ func() *networkingv1alpha1.IPAddress { return &networkingv1alpha1.IPAddress{} },
+ func() *networkingv1alpha1.IPAddressList { return &networkingv1alpha1.IPAddressList{} },
+ gentype.PrefersProtobuf[*networkingv1alpha1.IPAddress](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go
index c730e6246..9e1b3064d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/networking/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *NetworkingV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := networkingv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
index b72fe5b69..38cc26010 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/networking/v1alpha1"
+ networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
+ applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type ServiceCIDRsGetter interface {
// ServiceCIDRInterface has methods to work with ServiceCIDR resources.
type ServiceCIDRInterface interface {
- Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error)
- Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
+ Create(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1alpha1.ServiceCIDR, error)
+ Update(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
+ UpdateStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.ServiceCIDR, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.ServiceCIDRList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error)
- Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.ServiceCIDR, err error)
+ Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
+ ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error)
ServiceCIDRExpansion
}
// serviceCIDRs implements ServiceCIDRInterface
type serviceCIDRs struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration]
}
// newServiceCIDRs returns a ServiceCIDRs
func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs {
return &serviceCIDRs{
- gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration](
"servicecidrs",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} },
- func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }),
+ func() *networkingv1alpha1.ServiceCIDR { return &networkingv1alpha1.ServiceCIDR{} },
+ func() *networkingv1alpha1.ServiceCIDRList { return &networkingv1alpha1.ServiceCIDRList{} },
+ gentype.PrefersProtobuf[*networkingv1alpha1.ServiceCIDR](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
index 90be275ad..b2d5aa2ce 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/networking/v1beta1"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
+ applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type IngressesGetter interface {
// IngressInterface has methods to work with Ingress resources.
type IngressInterface interface {
- Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
- Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+ Create(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.CreateOptions) (*networkingv1beta1.Ingress, error)
+ Update(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+ UpdateStatus(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.Ingress, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
- Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.Ingress, err error)
+ Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error)
+ ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error)
IngressExpansion
}
// ingresses implements IngressInterface
type ingresses struct {
- *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration]
}
// newIngresses returns a Ingresses
func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses {
return &ingresses{
- gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration](
"ingresses",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Ingress { return &v1beta1.Ingress{} },
- func() *v1beta1.IngressList { return &v1beta1.IngressList{} }),
+ func() *networkingv1beta1.Ingress { return &networkingv1beta1.Ingress{} },
+ func() *networkingv1beta1.IngressList { return &networkingv1beta1.IngressList{} },
+ gentype.PrefersProtobuf[*networkingv1beta1.Ingress](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
index c55da4168..dd37fc5cd 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/networking/v1beta1"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
+ applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type IngressClassesGetter interface {
// IngressClassInterface has methods to work with IngressClass resources.
type IngressClassInterface interface {
- Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (*v1beta1.IngressClass, error)
- Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (*v1beta1.IngressClass, error)
+ Create(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.CreateOptions) (*networkingv1beta1.IngressClass, error)
+ Update(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.UpdateOptions) (*networkingv1beta1.IngressClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IngressClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IngressClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error)
- Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IngressClass, err error)
+ Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IngressClass, err error)
IngressClassExpansion
}
// ingressClasses implements IngressClassInterface
type ingressClasses struct {
- *gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration]
}
// newIngressClasses returns a IngressClasses
func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses {
return &ingressClasses{
- gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration](
"ingressclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} },
- func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }),
+ func() *networkingv1beta1.IngressClass { return &networkingv1beta1.IngressClass{} },
+ func() *networkingv1beta1.IngressClassList { return &networkingv1beta1.IngressClassList{} },
+ gentype.PrefersProtobuf[*networkingv1beta1.IngressClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go
index 09e4139e7..0b7ffff72 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/networking/v1beta1"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
+ applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type IPAddressesGetter interface {
// IPAddressInterface has methods to work with IPAddress resources.
type IPAddressInterface interface {
- Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error)
- Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error)
+ Create(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.CreateOptions) (*networkingv1beta1.IPAddress, error)
+ Update(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.UpdateOptions) (*networkingv1beta1.IPAddress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IPAddress, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IPAddressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error)
- Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IPAddress, err error)
+ Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IPAddress, err error)
IPAddressExpansion
}
// iPAddresses implements IPAddressInterface
type iPAddresses struct {
- *gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration]
}
// newIPAddresses returns a IPAddresses
func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses {
return &iPAddresses{
- gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration](
"ipaddresses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} },
- func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }),
+ func() *networkingv1beta1.IPAddress { return &networkingv1beta1.IPAddress{} },
+ func() *networkingv1beta1.IPAddressList { return &networkingv1beta1.IPAddressList{} },
+ gentype.PrefersProtobuf[*networkingv1beta1.IPAddress](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
index d35225abd..cb4b0c601 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/networking/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *NetworkingV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := networkingv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go
index d3336f2ec..6ad1daf74 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/networking/v1beta1"
+ networkingv1beta1 "k8s.io/api/networking/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
+ applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type ServiceCIDRsGetter interface {
// ServiceCIDRInterface has methods to work with ServiceCIDR resources.
type ServiceCIDRInterface interface {
- Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error)
- Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error)
+ Create(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1beta1.ServiceCIDR, error)
+ Update(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error)
+ UpdateStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.ServiceCIDR, error)
+ List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.ServiceCIDRList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error)
- Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.ServiceCIDR, err error)
+ Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error)
+ ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error)
ServiceCIDRExpansion
}
// serviceCIDRs implements ServiceCIDRInterface
type serviceCIDRs struct {
- *gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]
+ *gentype.ClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration]
}
// newServiceCIDRs returns a ServiceCIDRs
func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs {
return &serviceCIDRs{
- gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration](
+ gentype.NewClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration](
"servicecidrs",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} },
- func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }),
+ func() *networkingv1beta1.ServiceCIDR { return &networkingv1beta1.ServiceCIDR{} },
+ func() *networkingv1beta1.ServiceCIDRList { return &networkingv1beta1.ServiceCIDRList{} },
+ gentype.PrefersProtobuf[*networkingv1beta1.ServiceCIDR](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go
index 844f9fc70..3bde21171 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/node/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ nodev1 "k8s.io/api/node/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := nodev1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
index 6c8110640..77311fab7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/node/v1"
+ nodev1 "k8s.io/api/node/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- nodev1 "k8s.io/client-go/applyconfigurations/node/v1"
+ applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RuntimeClassesGetter interface {
// RuntimeClassInterface has methods to work with RuntimeClass resources.
type RuntimeClassInterface interface {
- Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error)
- Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error)
+ Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.CreateOptions) (*nodev1.RuntimeClass, error)
+ Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.UpdateOptions) (*nodev1.RuntimeClass, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*nodev1.RuntimeClass, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*nodev1.RuntimeClassList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error)
- Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error)
+ Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *nodev1.RuntimeClass, err error)
RuntimeClassExpansion
}
// runtimeClasses implements RuntimeClassInterface
type runtimeClasses struct {
- *gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration]
}
// newRuntimeClasses returns a RuntimeClasses
func newRuntimeClasses(c *NodeV1Client) *runtimeClasses {
return &runtimeClasses{
- gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration](
"runtimeclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.RuntimeClass { return &v1.RuntimeClass{} },
- func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }),
+ func() *nodev1.RuntimeClass { return &nodev1.RuntimeClass{} },
+ func() *nodev1.RuntimeClassList { return &nodev1.RuntimeClassList{} },
+ gentype.PrefersProtobuf[*nodev1.RuntimeClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go
index 2a197d58e..e47ef3548 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/node/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ nodev1alpha1 "k8s.io/api/node/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := nodev1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
index 60aa4a213..f9da4f07b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/node/v1alpha1"
+ nodev1alpha1 "k8s.io/api/node/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
+ applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RuntimeClassesGetter interface {
// RuntimeClassInterface has methods to work with RuntimeClass resources.
type RuntimeClassInterface interface {
- Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error)
- Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error)
+ Create(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.CreateOptions) (*nodev1alpha1.RuntimeClass, error)
+ Update(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.UpdateOptions) (*nodev1alpha1.RuntimeClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1alpha1.RuntimeClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*nodev1alpha1.RuntimeClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error)
- Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1alpha1.RuntimeClass, err error)
+ Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1alpha1.RuntimeClass, err error)
RuntimeClassExpansion
}
// runtimeClasses implements RuntimeClassInterface
type runtimeClasses struct {
- *gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration]
}
// newRuntimeClasses returns a RuntimeClasses
func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses {
return &runtimeClasses{
- gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration](
"runtimeclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} },
- func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }),
+ func() *nodev1alpha1.RuntimeClass { return &nodev1alpha1.RuntimeClass{} },
+ func() *nodev1alpha1.RuntimeClassList { return &nodev1alpha1.RuntimeClassList{} },
+ gentype.PrefersProtobuf[*nodev1alpha1.RuntimeClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go
index 4f6802ffa..c7864a479 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/node/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ nodev1beta1 "k8s.io/api/node/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := nodev1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
index 8e15d5288..18089defd 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/node/v1beta1"
+ nodev1beta1 "k8s.io/api/node/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1"
+ applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RuntimeClassesGetter interface {
// RuntimeClassInterface has methods to work with RuntimeClass resources.
type RuntimeClassInterface interface {
- Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error)
- Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error)
+ Create(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.CreateOptions) (*nodev1beta1.RuntimeClass, error)
+ Update(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.UpdateOptions) (*nodev1beta1.RuntimeClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1beta1.RuntimeClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*nodev1beta1.RuntimeClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error)
- Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1beta1.RuntimeClass, err error)
+ Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1beta1.RuntimeClass, err error)
RuntimeClassExpansion
}
// runtimeClasses implements RuntimeClassInterface
type runtimeClasses struct {
- *gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration]
}
// newRuntimeClasses returns a RuntimeClasses
func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses {
return &runtimeClasses{
- gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration](
"runtimeclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} },
- func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }),
+ func() *nodev1beta1.RuntimeClass { return &nodev1beta1.RuntimeClass{} },
+ func() *nodev1beta1.RuntimeClassList { return &nodev1beta1.RuntimeClassList{} },
+ gentype.PrefersProtobuf[*nodev1beta1.RuntimeClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
index 22173d36d..6cf60806a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1
import (
- v1 "k8s.io/api/policy/v1"
+ policyv1 "k8s.io/api/policy/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -37,17 +37,19 @@ type EvictionInterface interface {
// evictions implements EvictionInterface
type evictions struct {
- *gentype.Client[*v1.Eviction]
+ *gentype.Client[*policyv1.Eviction]
}
// newEvictions returns a Evictions
func newEvictions(c *PolicyV1Client, namespace string) *evictions {
return &evictions{
- gentype.NewClient[*v1.Eviction](
+ gentype.NewClient[*policyv1.Eviction](
"evictions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Eviction { return &v1.Eviction{} }),
+ func() *policyv1.Eviction { return &policyv1.Eviction{} },
+ gentype.PrefersProtobuf[*policyv1.Eviction](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
index 6d011cbce..d45ed21f5 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/policy/v1"
+ policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- policyv1 "k8s.io/client-go/applyconfigurations/policy/v1"
+ applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type PodDisruptionBudgetsGetter interface {
// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources.
type PodDisruptionBudgetInterface interface {
- Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error)
- Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error)
+ Create(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.CreateOptions) (*policyv1.PodDisruptionBudget, error)
+ Update(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error)
+ UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodDisruptionBudget, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PodDisruptionBudgetList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*policyv1.PodDisruptionBudget, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*policyv1.PodDisruptionBudgetList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error)
- Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *policyv1.PodDisruptionBudget, err error)
+ Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error)
+ ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error)
PodDisruptionBudgetExpansion
}
// podDisruptionBudgets implements PodDisruptionBudgetInterface
type podDisruptionBudgets struct {
- *gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration]
}
// newPodDisruptionBudgets returns a PodDisruptionBudgets
func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets {
return &podDisruptionBudgets{
- gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration](
"poddisruptionbudgets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} },
- func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }),
+ func() *policyv1.PodDisruptionBudget { return &policyv1.PodDisruptionBudget{} },
+ func() *policyv1.PodDisruptionBudgetList { return &policyv1.PodDisruptionBudgetList{} },
+ gentype.PrefersProtobuf[*policyv1.PodDisruptionBudget](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
index 9bfd98aa9..8d84f460b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/policy/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ policyv1 "k8s.io/api/policy/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := policyv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
index e003ece6b..de4c35e76 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go
@@ -19,7 +19,7 @@ limitations under the License.
package v1beta1
import (
- v1beta1 "k8s.io/api/policy/v1beta1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -37,17 +37,19 @@ type EvictionInterface interface {
// evictions implements EvictionInterface
type evictions struct {
- *gentype.Client[*v1beta1.Eviction]
+ *gentype.Client[*policyv1beta1.Eviction]
}
// newEvictions returns a Evictions
func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions {
return &evictions{
- gentype.NewClient[*v1beta1.Eviction](
+ gentype.NewClient[*policyv1beta1.Eviction](
"evictions",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Eviction { return &v1beta1.Eviction{} }),
+ func() *policyv1beta1.Eviction { return &policyv1beta1.Eviction{} },
+ gentype.PrefersProtobuf[*policyv1beta1.Eviction](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
index 411181237..00e044961 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/policy/v1beta1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1"
+ applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type PodDisruptionBudgetsGetter interface {
// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources.
type PodDisruptionBudgetInterface interface {
- Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error)
- Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
+ Create(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*policyv1beta1.PodDisruptionBudget, error)
+ Update(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
+ UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*policyv1beta1.PodDisruptionBudget, error)
+ List(ctx context.Context, opts v1.ListOptions) (*policyv1beta1.PodDisruptionBudgetList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error)
- Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *policyv1beta1.PodDisruptionBudget, err error)
+ Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error)
+ ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error)
PodDisruptionBudgetExpansion
}
// podDisruptionBudgets implements PodDisruptionBudgetInterface
type podDisruptionBudgets struct {
- *gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]
+ *gentype.ClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration]
}
// newPodDisruptionBudgets returns a PodDisruptionBudgets
func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets {
return &podDisruptionBudgets{
- gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration](
+ gentype.NewClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration](
"poddisruptionbudgets",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} },
- func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }),
+ func() *policyv1beta1.PodDisruptionBudget { return &policyv1beta1.PodDisruptionBudget{} },
+ func() *policyv1beta1.PodDisruptionBudgetList { return &policyv1beta1.PodDisruptionBudgetList{} },
+ gentype.PrefersProtobuf[*policyv1beta1.PodDisruptionBudget](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
index fdb509321..d8e78627e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/policy/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := policyv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
index 19fff0ee4..cccad0487 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/rbac/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
+ applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRolesGetter interface {
// ClusterRoleInterface has methods to work with ClusterRole resources.
type ClusterRoleInterface interface {
- Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error)
- Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error)
+ Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.CreateOptions) (*rbacv1.ClusterRole, error)
+ Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.UpdateOptions) (*rbacv1.ClusterRole, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRole, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error)
- Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error)
+ Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRole, err error)
ClusterRoleExpansion
}
// clusterRoles implements ClusterRoleInterface
type clusterRoles struct {
- *gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration]
}
// newClusterRoles returns a ClusterRoles
func newClusterRoles(c *RbacV1Client) *clusterRoles {
return &clusterRoles{
- gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration](
"clusterroles",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ClusterRole { return &v1.ClusterRole{} },
- func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }),
+ func() *rbacv1.ClusterRole { return &rbacv1.ClusterRole{} },
+ func() *rbacv1.ClusterRoleList { return &rbacv1.ClusterRoleList{} },
+ gentype.PrefersProtobuf[*rbacv1.ClusterRole](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
index 77fb3785e..4a75fdcb5 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/rbac/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
+ applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface {
// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
type ClusterRoleBindingInterface interface {
- Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error)
- Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error)
+ Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.CreateOptions) (*rbacv1.ClusterRoleBinding, error)
+ Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.UpdateOptions) (*rbacv1.ClusterRoleBinding, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRoleBinding, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleBindingList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error)
- Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error)
+ Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRoleBinding, err error)
ClusterRoleBindingExpansion
}
// clusterRoleBindings implements ClusterRoleBindingInterface
type clusterRoleBindings struct {
- *gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration]
}
// newClusterRoleBindings returns a ClusterRoleBindings
func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings {
return &clusterRoleBindings{
- gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration](
"clusterrolebindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} },
- func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }),
+ func() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{} },
+ func() *rbacv1.ClusterRoleBindingList { return &rbacv1.ClusterRoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1.ClusterRoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
index a02f0357d..c586ee638 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/rbac/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ rbacv1 "k8s.io/api/rbac/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := rbacv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
index b75b055f0..c3a9ba135 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/rbac/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
+ applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RolesGetter interface {
// RoleInterface has methods to work with Role resources.
type RoleInterface interface {
- Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error)
- Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error)
+ Create(ctx context.Context, role *rbacv1.Role, opts metav1.CreateOptions) (*rbacv1.Role, error)
+ Update(ctx context.Context, role *rbacv1.Role, opts metav1.UpdateOptions) (*rbacv1.Role, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.Role, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error)
- Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error)
+ Apply(ctx context.Context, role *applyconfigurationsrbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.Role, err error)
RoleExpansion
}
// roles implements RoleInterface
type roles struct {
- *gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration]
}
// newRoles returns a Roles
func newRoles(c *RbacV1Client, namespace string) *roles {
return &roles{
- gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration](
"roles",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.Role { return &v1.Role{} },
- func() *v1.RoleList { return &v1.RoleList{} }),
+ func() *rbacv1.Role { return &rbacv1.Role{} },
+ func() *rbacv1.RoleList { return &rbacv1.RoleList{} },
+ gentype.PrefersProtobuf[*rbacv1.Role](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
index fcbb1c0e2..1f5a39490 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/rbac/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
+ applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RoleBindingsGetter interface {
// RoleBindingInterface has methods to work with RoleBinding resources.
type RoleBindingInterface interface {
- Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error)
- Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error)
+ Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.CreateOptions) (*rbacv1.RoleBinding, error)
+ Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.UpdateOptions) (*rbacv1.RoleBinding, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.RoleBinding, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleBindingList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error)
- Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error)
+ Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.RoleBinding, err error)
RoleBindingExpansion
}
// roleBindings implements RoleBindingInterface
type roleBindings struct {
- *gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration]
}
// newRoleBindings returns a RoleBindings
func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings {
return &roleBindings{
- gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration](
"rolebindings",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.RoleBinding { return &v1.RoleBinding{} },
- func() *v1.RoleBindingList { return &v1.RoleBindingList{} }),
+ func() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{} },
+ func() *rbacv1.RoleBindingList { return &rbacv1.RoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1.RoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
index f91e2c50a..3874f9dee 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
+ applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRolesGetter interface {
// ClusterRoleInterface has methods to work with ClusterRole resources.
type ClusterRoleInterface interface {
- Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error)
- Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error)
+ Create(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRole, error)
+ Update(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRole, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRole, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error)
- Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRole, err error)
+ Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRole, err error)
ClusterRoleExpansion
}
// clusterRoles implements ClusterRoleInterface
type clusterRoles struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration]
}
// newClusterRoles returns a ClusterRoles
func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles {
return &clusterRoles{
- gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration](
"clusterroles",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} },
- func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }),
+ func() *rbacv1alpha1.ClusterRole { return &rbacv1alpha1.ClusterRole{} },
+ func() *rbacv1alpha1.ClusterRoleList { return &rbacv1alpha1.ClusterRoleList{} },
+ gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRole](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
index 3f04526f0..434f875f8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
+ applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface {
// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
type ClusterRoleBindingInterface interface {
- Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error)
- Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error)
+ Create(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRoleBinding, error)
+ Update(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRoleBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRoleBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error)
- Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRoleBinding, err error)
+ Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRoleBinding, err error)
ClusterRoleBindingExpansion
}
// clusterRoleBindings implements ClusterRoleBindingInterface
type clusterRoleBindings struct {
- *gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration]
}
// newClusterRoleBindings returns a ClusterRoleBindings
func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings {
return &clusterRoleBindings{
- gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration](
"clusterrolebindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} },
- func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }),
+ func() *rbacv1alpha1.ClusterRoleBinding { return &rbacv1alpha1.ClusterRoleBinding{} },
+ func() *rbacv1alpha1.ClusterRoleBindingList { return &rbacv1alpha1.ClusterRoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
index cc5b309e9..df46fc3aa 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/rbac/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := rbacv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
index 4a1876a7d..3a47f673e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
+ applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RolesGetter interface {
// RoleInterface has methods to work with Role resources.
type RoleInterface interface {
- Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error)
- Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error)
+ Create(ctx context.Context, role *rbacv1alpha1.Role, opts v1.CreateOptions) (*rbacv1alpha1.Role, error)
+ Update(ctx context.Context, role *rbacv1alpha1.Role, opts v1.UpdateOptions) (*rbacv1alpha1.Role, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.Role, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error)
- Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.Role, err error)
+ Apply(ctx context.Context, role *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.Role, err error)
RoleExpansion
}
// roles implements RoleInterface
type roles struct {
- *gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration]
}
// newRoles returns a Roles
func newRoles(c *RbacV1alpha1Client, namespace string) *roles {
return &roles{
- gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration](
"roles",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha1.Role { return &v1alpha1.Role{} },
- func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }),
+ func() *rbacv1alpha1.Role { return &rbacv1alpha1.Role{} },
+ func() *rbacv1alpha1.RoleList { return &rbacv1alpha1.RoleList{} },
+ gentype.PrefersProtobuf[*rbacv1alpha1.Role](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
index 6473132f1..a6293171d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
+ applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RoleBindingsGetter interface {
// RoleBindingInterface has methods to work with RoleBinding resources.
type RoleBindingInterface interface {
- Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error)
- Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error)
+ Create(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.RoleBinding, error)
+ Update(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.RoleBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.RoleBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error)
- Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.RoleBinding, err error)
+ Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.RoleBinding, err error)
RoleBindingExpansion
}
// roleBindings implements RoleBindingInterface
type roleBindings struct {
- *gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration]
}
// newRoleBindings returns a RoleBindings
func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings {
return &roleBindings{
- gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration](
"rolebindings",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} },
- func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }),
+ func() *rbacv1alpha1.RoleBinding { return &rbacv1alpha1.RoleBinding{} },
+ func() *rbacv1alpha1.RoleBindingList { return &rbacv1alpha1.RoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1alpha1.RoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
index ed398333a..92388f2f1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/rbac/v1beta1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
+ applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRolesGetter interface {
// ClusterRoleInterface has methods to work with ClusterRole resources.
type ClusterRoleInterface interface {
- Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error)
- Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error)
+ Create(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.CreateOptions) (*rbacv1beta1.ClusterRole, error)
+ Update(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRole, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRole, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error)
- Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRole, err error)
+ Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRole, err error)
ClusterRoleExpansion
}
// clusterRoles implements ClusterRoleInterface
type clusterRoles struct {
- *gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration]
}
// newClusterRoles returns a ClusterRoles
func newClusterRoles(c *RbacV1beta1Client) *clusterRoles {
return &clusterRoles{
- gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration](
"clusterroles",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} },
- func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }),
+ func() *rbacv1beta1.ClusterRole { return &rbacv1beta1.ClusterRole{} },
+ func() *rbacv1beta1.ClusterRoleList { return &rbacv1beta1.ClusterRoleList{} },
+ gentype.PrefersProtobuf[*rbacv1beta1.ClusterRole](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
index 3010a99ae..beb50f7b7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/rbac/v1beta1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
+ applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ClusterRoleBindingsGetter interface {
// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
type ClusterRoleBindingInterface interface {
- Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error)
- Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error)
+ Create(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1beta1.ClusterRoleBinding, error)
+ Update(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRoleBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRoleBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error)
- Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRoleBinding, err error)
+ Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRoleBinding, err error)
ClusterRoleBindingExpansion
}
// clusterRoleBindings implements ClusterRoleBindingInterface
type clusterRoleBindings struct {
- *gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration]
}
// newClusterRoleBindings returns a ClusterRoleBindings
func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings {
return &clusterRoleBindings{
- gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration](
"clusterrolebindings",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} },
- func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }),
+ func() *rbacv1beta1.ClusterRoleBinding { return &rbacv1beta1.ClusterRoleBinding{} },
+ func() *rbacv1beta1.ClusterRoleBindingList { return &rbacv1beta1.ClusterRoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1beta1.ClusterRoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
index 8dac5c1d4..5739bb289 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/rbac/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := rbacv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
index 92e51da1b..700fc6d22 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/rbac/v1beta1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
+ applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RolesGetter interface {
// RoleInterface has methods to work with Role resources.
type RoleInterface interface {
- Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error)
- Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error)
+ Create(ctx context.Context, role *rbacv1beta1.Role, opts v1.CreateOptions) (*rbacv1beta1.Role, error)
+ Update(ctx context.Context, role *rbacv1beta1.Role, opts v1.UpdateOptions) (*rbacv1beta1.Role, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.Role, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error)
- Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.Role, err error)
+ Apply(ctx context.Context, role *applyconfigurationsrbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.Role, err error)
RoleExpansion
}
// roles implements RoleInterface
type roles struct {
- *gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration]
}
// newRoles returns a Roles
func newRoles(c *RbacV1beta1Client, namespace string) *roles {
return &roles{
- gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration](
"roles",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.Role { return &v1beta1.Role{} },
- func() *v1beta1.RoleList { return &v1beta1.RoleList{} }),
+ func() *rbacv1beta1.Role { return &rbacv1beta1.Role{} },
+ func() *rbacv1beta1.RoleList { return &rbacv1beta1.RoleList{} },
+ gentype.PrefersProtobuf[*rbacv1beta1.Role](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
index ad31bd051..0f423a0d9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/rbac/v1beta1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
+ applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type RoleBindingsGetter interface {
// RoleBindingInterface has methods to work with RoleBinding resources.
type RoleBindingInterface interface {
- Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error)
- Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error)
+ Create(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.CreateOptions) (*rbacv1beta1.RoleBinding, error)
+ Update(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.RoleBinding, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.RoleBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleBindingList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error)
- Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.RoleBinding, err error)
+ Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.RoleBinding, err error)
RoleBindingExpansion
}
// roleBindings implements RoleBindingInterface
type roleBindings struct {
- *gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]
+ *gentype.ClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration]
}
// newRoleBindings returns a RoleBindings
func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings {
return &roleBindings{
- gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration](
+ gentype.NewClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration](
"rolebindings",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} },
- func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }),
+ func() *rbacv1beta1.RoleBinding { return &rbacv1beta1.RoleBinding{} },
+ func() *rbacv1beta1.RoleBindingList { return &rbacv1beta1.RoleBindingList{} },
+ gentype.PrefersProtobuf[*rbacv1beta1.RoleBinding](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go
index 35455dfa3..6cdf57c53 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha3
import (
- "context"
+ context "context"
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
+ applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type DeviceClassesGetter interface {
// DeviceClassInterface has methods to work with DeviceClass resources.
type DeviceClassInterface interface {
- Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error)
- Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error)
+ Create(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.CreateOptions) (*resourcev1alpha3.DeviceClass, error)
+ Update(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.UpdateOptions) (*resourcev1alpha3.DeviceClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.DeviceClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.DeviceClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error)
- Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.DeviceClass, err error)
+ Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.DeviceClass, err error)
DeviceClassExpansion
}
// deviceClasses implements DeviceClassInterface
type deviceClasses struct {
- *gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration]
}
// newDeviceClasses returns a DeviceClasses
func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses {
return &deviceClasses{
- gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration](
"deviceclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} },
- func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }),
+ func() *resourcev1alpha3.DeviceClass { return &resourcev1alpha3.DeviceClass{} },
+ func() *resourcev1alpha3.DeviceClassList { return &resourcev1alpha3.DeviceClassList{} },
+ gentype.PrefersProtobuf[*resourcev1alpha3.DeviceClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go
index 747e564b7..cd8862ea8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go
@@ -20,8 +20,6 @@ package v1alpha3
type DeviceClassExpansion interface{}
-type PodSchedulingContextExpansion interface{}
-
type ResourceClaimExpansion interface{}
type ResourceClaimTemplateExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go
deleted file mode 100644
index af5984321..000000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- "context"
-
- v1alpha3 "k8s.io/api/resource/v1alpha3"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
- gentype "k8s.io/client-go/gentype"
- scheme "k8s.io/client-go/kubernetes/scheme"
-)
-
-// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface.
-// A group's client should implement this interface.
-type PodSchedulingContextsGetter interface {
- PodSchedulingContexts(namespace string) PodSchedulingContextInterface
-}
-
-// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources.
-type PodSchedulingContextInterface interface {
- Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error)
- Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error)
- // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error)
- Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error)
- Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error)
- Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error)
- // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error)
- PodSchedulingContextExpansion
-}
-
-// podSchedulingContexts implements PodSchedulingContextInterface
-type podSchedulingContexts struct {
- *gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]
-}
-
-// newPodSchedulingContexts returns a PodSchedulingContexts
-func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts {
- return &podSchedulingContexts{
- gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration](
- "podschedulingcontexts",
- c.RESTClient(),
- scheme.ParameterCodec,
- namespace,
- func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} },
- func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }),
- }
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go
index 879f0990d..acc9b97c2 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go
@@ -19,17 +19,16 @@ limitations under the License.
package v1alpha3
import (
- "net/http"
+ http "net/http"
- v1alpha3 "k8s.io/api/resource/v1alpha3"
- "k8s.io/client-go/kubernetes/scheme"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
type ResourceV1alpha3Interface interface {
RESTClient() rest.Interface
DeviceClassesGetter
- PodSchedulingContextsGetter
ResourceClaimsGetter
ResourceClaimTemplatesGetter
ResourceSlicesGetter
@@ -44,10 +43,6 @@ func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface {
return newDeviceClasses(c)
}
-func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface {
- return newPodSchedulingContexts(c, namespace)
-}
-
func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface {
return newResourceClaims(c, namespace)
}
@@ -105,10 +100,10 @@ func New(c rest.Interface) *ResourceV1alpha3Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha3.SchemeGroupVersion
+ gv := resourcev1alpha3.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go
index 2ac65c005..a95ac56d3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha3
import (
- "context"
+ context "context"
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
+ applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type ResourceClaimsGetter interface {
// ResourceClaimInterface has methods to work with ResourceClaim resources.
type ResourceClaimInterface interface {
- Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error)
- Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error)
+ Create(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaim, error)
+ Update(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error)
+ UpdateStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaim, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error)
- Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaim, err error)
+ Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error)
+ ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error)
ResourceClaimExpansion
}
// resourceClaims implements ResourceClaimInterface
type resourceClaims struct {
- *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]
+ *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration]
}
// newResourceClaims returns a ResourceClaims
func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims {
return &resourceClaims{
- gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration](
+ gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration](
"resourceclaims",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} },
- func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }),
+ func() *resourcev1alpha3.ResourceClaim { return &resourcev1alpha3.ResourceClaim{} },
+ func() *resourcev1alpha3.ResourceClaimList { return &resourcev1alpha3.ResourceClaimList{} },
+ gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaim](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go
index 87997bfee..a8ba1f696 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha3
import (
- "context"
+ context "context"
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
+ applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,36 @@ type ResourceClaimTemplatesGetter interface {
// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources.
type ResourceClaimTemplateInterface interface {
- Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error)
- Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error)
+ Create(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error)
+ Update(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaimTemplate, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimTemplateList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error)
- Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaimTemplate, err error)
+ Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaimTemplate, err error)
ResourceClaimTemplateExpansion
}
// resourceClaimTemplates implements ResourceClaimTemplateInterface
type resourceClaimTemplates struct {
- *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]
+ *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration]
}
// newResourceClaimTemplates returns a ResourceClaimTemplates
func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates {
return &resourceClaimTemplates{
- gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration](
+ gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration](
"resourceclaimtemplates",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} },
- func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }),
+ func() *resourcev1alpha3.ResourceClaimTemplate { return &resourcev1alpha3.ResourceClaimTemplate{} },
+ func() *resourcev1alpha3.ResourceClaimTemplateList {
+ return &resourcev1alpha3.ResourceClaimTemplateList{}
+ },
+ gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaimTemplate](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go
index 081904140..91dfce5ec 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha3
import (
- "context"
+ context "context"
- v1alpha3 "k8s.io/api/resource/v1alpha3"
+ resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
+ applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type ResourceSlicesGetter interface {
// ResourceSliceInterface has methods to work with ResourceSlice resources.
type ResourceSliceInterface interface {
- Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error)
- Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error)
+ Create(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.CreateOptions) (*resourcev1alpha3.ResourceSlice, error)
+ Update(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceSlice, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceSlice, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceSliceList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error)
- Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceSlice, err error)
+ Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceSlice, err error)
ResourceSliceExpansion
}
// resourceSlices implements ResourceSliceInterface
type resourceSlices struct {
- *gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]
+ *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration]
}
// newResourceSlices returns a ResourceSlices
func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices {
return &resourceSlices{
- gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration](
+ gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration](
"resourceslices",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} },
- func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }),
+ func() *resourcev1alpha3.ResourceSlice { return &resourcev1alpha3.ResourceSlice{} },
+ func() *resourcev1alpha3.ResourceSliceList { return &resourcev1alpha3.ResourceSliceList{} },
+ gentype.PrefersProtobuf[*resourcev1alpha3.ResourceSlice](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go
new file mode 100644
index 000000000..e41416d39
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ context "context"
+
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// DeviceClassesGetter has a method to return a DeviceClassInterface.
+// A group's client should implement this interface.
+type DeviceClassesGetter interface {
+ DeviceClasses() DeviceClassInterface
+}
+
+// DeviceClassInterface has methods to work with DeviceClass resources.
+type DeviceClassInterface interface {
+ Create(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.CreateOptions) (*resourcev1beta1.DeviceClass, error)
+ Update(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.UpdateOptions) (*resourcev1beta1.DeviceClass, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.DeviceClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.DeviceClassList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.DeviceClass, err error)
+ Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.DeviceClass, err error)
+ DeviceClassExpansion
+}
+
+// deviceClasses implements DeviceClassInterface
+type deviceClasses struct {
+ *gentype.ClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration]
+}
+
+// newDeviceClasses returns a DeviceClasses
+func newDeviceClasses(c *ResourceV1beta1Client) *deviceClasses {
+ return &deviceClasses{
+ gentype.NewClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration](
+ "deviceclasses",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *resourcev1beta1.DeviceClass { return &resourcev1beta1.DeviceClass{} },
+ func() *resourcev1beta1.DeviceClassList { return &resourcev1beta1.DeviceClassList{} },
+ gentype.PrefersProtobuf[*resourcev1beta1.DeviceClass](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go
new file mode 100644
index 000000000..771101956
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go
new file mode 100644
index 000000000..d5fcfc214
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type DeviceClassExpansion interface{}
+
+type ResourceClaimExpansion interface{}
+
+type ResourceClaimTemplateExpansion interface{}
+
+type ResourceSliceExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go
new file mode 100644
index 000000000..c6a3b2836
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go
@@ -0,0 +1,122 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ http "net/http"
+
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type ResourceV1beta1Interface interface {
+ RESTClient() rest.Interface
+ DeviceClassesGetter
+ ResourceClaimsGetter
+ ResourceClaimTemplatesGetter
+ ResourceSlicesGetter
+}
+
+// ResourceV1beta1Client is used to interact with features provided by the resource.k8s.io group.
+type ResourceV1beta1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ResourceV1beta1Client) DeviceClasses() DeviceClassInterface {
+ return newDeviceClasses(c)
+}
+
+func (c *ResourceV1beta1Client) ResourceClaims(namespace string) ResourceClaimInterface {
+ return newResourceClaims(c, namespace)
+}
+
+func (c *ResourceV1beta1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface {
+ return newResourceClaimTemplates(c, namespace)
+}
+
+func (c *ResourceV1beta1Client) ResourceSlices() ResourceSliceInterface {
+ return newResourceSlices(c)
+}
+
+// NewForConfig creates a new ResourceV1beta1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ResourceV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ResourceV1beta1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ResourceV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ResourceV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ResourceV1beta1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ResourceV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *ResourceV1beta1Client {
+ return &ResourceV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := resourcev1beta1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ResourceV1beta1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go
new file mode 100644
index 000000000..3172ab5df
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ context "context"
+
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// ResourceClaimsGetter has a method to return a ResourceClaimInterface.
+// A group's client should implement this interface.
+type ResourceClaimsGetter interface {
+ ResourceClaims(namespace string) ResourceClaimInterface
+}
+
+// ResourceClaimInterface has methods to work with ResourceClaim resources.
+type ResourceClaimInterface interface {
+ Create(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaim, error)
+ Update(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaim, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaim, err error)
+ Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error)
+ // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+ ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error)
+ ResourceClaimExpansion
+}
+
+// resourceClaims implements ResourceClaimInterface
+type resourceClaims struct {
+ *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration]
+}
+
+// newResourceClaims returns a ResourceClaims
+func newResourceClaims(c *ResourceV1beta1Client, namespace string) *resourceClaims {
+ return &resourceClaims{
+ gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration](
+ "resourceclaims",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ namespace,
+ func() *resourcev1beta1.ResourceClaim { return &resourcev1beta1.ResourceClaim{} },
+ func() *resourcev1beta1.ResourceClaimList { return &resourcev1beta1.ResourceClaimList{} },
+ gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaim](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go
new file mode 100644
index 000000000..26c6fe829
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ context "context"
+
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface.
+// A group's client should implement this interface.
+type ResourceClaimTemplatesGetter interface {
+ ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface
+}
+
+// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources.
+type ResourceClaimTemplateInterface interface {
+ Create(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaimTemplate, error)
+ Update(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaimTemplate, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaimTemplate, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimTemplateList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaimTemplate, err error)
+ Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaimTemplate, err error)
+ ResourceClaimTemplateExpansion
+}
+
+// resourceClaimTemplates implements ResourceClaimTemplateInterface
+type resourceClaimTemplates struct {
+ *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration]
+}
+
+// newResourceClaimTemplates returns a ResourceClaimTemplates
+func newResourceClaimTemplates(c *ResourceV1beta1Client, namespace string) *resourceClaimTemplates {
+ return &resourceClaimTemplates{
+ gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration](
+ "resourceclaimtemplates",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ namespace,
+ func() *resourcev1beta1.ResourceClaimTemplate { return &resourcev1beta1.ResourceClaimTemplate{} },
+ func() *resourcev1beta1.ResourceClaimTemplateList { return &resourcev1beta1.ResourceClaimTemplateList{} },
+ gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaimTemplate](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go
new file mode 100644
index 000000000..c4e985ea4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ context "context"
+
+ resourcev1beta1 "k8s.io/api/resource/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// ResourceSlicesGetter has a method to return a ResourceSliceInterface.
+// A group's client should implement this interface.
+type ResourceSlicesGetter interface {
+ ResourceSlices() ResourceSliceInterface
+}
+
+// ResourceSliceInterface has methods to work with ResourceSlice resources.
+type ResourceSliceInterface interface {
+ Create(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.CreateOptions) (*resourcev1beta1.ResourceSlice, error)
+ Update(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.UpdateOptions) (*resourcev1beta1.ResourceSlice, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceSlice, error)
+ List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceSliceList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceSlice, err error)
+ Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceSlice, err error)
+ ResourceSliceExpansion
+}
+
+// resourceSlices implements ResourceSliceInterface
+type resourceSlices struct {
+ *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration]
+}
+
+// newResourceSlices returns a ResourceSlices
+func newResourceSlices(c *ResourceV1beta1Client) *resourceSlices {
+ return &resourceSlices{
+ gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration](
+ "resourceslices",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *resourcev1beta1.ResourceSlice { return &resourcev1beta1.ResourceSlice{} },
+ func() *resourcev1beta1.ResourceSliceList { return &resourcev1beta1.ResourceSliceList{} },
+ gentype.PrefersProtobuf[*resourcev1beta1.ResourceSlice](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
index a28ef2fd4..3642b404a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/scheduling/v1"
+ schedulingv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1"
+ applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type PriorityClassesGetter interface {
// PriorityClassInterface has methods to work with PriorityClass resources.
type PriorityClassInterface interface {
- Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error)
- Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error)
+ Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.CreateOptions) (*schedulingv1.PriorityClass, error)
+ Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.UpdateOptions) (*schedulingv1.PriorityClass, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*schedulingv1.PriorityClass, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1.PriorityClassList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error)
- Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error)
+ Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *schedulingv1.PriorityClass, err error)
PriorityClassExpansion
}
// priorityClasses implements PriorityClassInterface
type priorityClasses struct {
- *gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration]
}
// newPriorityClasses returns a PriorityClasses
func newPriorityClasses(c *SchedulingV1Client) *priorityClasses {
return &priorityClasses{
- gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration](
"priorityclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.PriorityClass { return &v1.PriorityClass{} },
- func() *v1.PriorityClassList { return &v1.PriorityClassList{} }),
+ func() *schedulingv1.PriorityClass { return &schedulingv1.PriorityClass{} },
+ func() *schedulingv1.PriorityClassList { return &schedulingv1.PriorityClassList{} },
+ gentype.PrefersProtobuf[*schedulingv1.PriorityClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go
index 11fc4b9f3..bbb46a9de 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/scheduling/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ schedulingv1 "k8s.io/api/scheduling/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := schedulingv1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
index 5c78f3de9..e7125f9fc 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
+ applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type PriorityClassesGetter interface {
// PriorityClassInterface has methods to work with PriorityClass resources.
type PriorityClassInterface interface {
- Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error)
- Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error)
+ Create(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.CreateOptions) (*schedulingv1alpha1.PriorityClass, error)
+ Update(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1alpha1.PriorityClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1alpha1.PriorityClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*schedulingv1alpha1.PriorityClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error)
- Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1alpha1.PriorityClass, err error)
+ Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1alpha1.PriorityClass, err error)
PriorityClassExpansion
}
// priorityClasses implements PriorityClassInterface
type priorityClasses struct {
- *gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration]
}
// newPriorityClasses returns a PriorityClasses
func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses {
return &priorityClasses{
- gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration](
"priorityclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} },
- func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }),
+ func() *schedulingv1alpha1.PriorityClass { return &schedulingv1alpha1.PriorityClass{} },
+ func() *schedulingv1alpha1.PriorityClassList { return &schedulingv1alpha1.PriorityClassList{} },
+ gentype.PrefersProtobuf[*schedulingv1alpha1.PriorityClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
index 47fb774a3..056ab855e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/scheduling/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := schedulingv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
index 9fef1d759..dcba291e3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/scheduling/v1beta1"
+ schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1"
+ applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type PriorityClassesGetter interface {
// PriorityClassInterface has methods to work with PriorityClass resources.
type PriorityClassInterface interface {
- Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error)
- Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error)
+ Create(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.CreateOptions) (*schedulingv1beta1.PriorityClass, error)
+ Update(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1beta1.PriorityClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1beta1.PriorityClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*schedulingv1beta1.PriorityClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error)
- Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1beta1.PriorityClass, err error)
+ Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1beta1.PriorityClass, err error)
PriorityClassExpansion
}
// priorityClasses implements PriorityClassInterface
type priorityClasses struct {
- *gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration]
}
// newPriorityClasses returns a PriorityClasses
func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses {
return &priorityClasses{
- gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration](
"priorityclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} },
- func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }),
+ func() *schedulingv1beta1.PriorityClass { return &schedulingv1beta1.PriorityClass{} },
+ func() *schedulingv1beta1.PriorityClassList { return &schedulingv1beta1.PriorityClassList{} },
+ gentype.PrefersProtobuf[*schedulingv1beta1.PriorityClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
index dbaf69414..9e383398e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/scheduling/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := schedulingv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
index 2e14db600..9eb82f9ed 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
+ applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSIDriversGetter interface {
// CSIDriverInterface has methods to work with CSIDriver resources.
type CSIDriverInterface interface {
- Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (*v1.CSIDriver, error)
- Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (*v1.CSIDriver, error)
+ Create(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.CreateOptions) (*storagev1.CSIDriver, error)
+ Update(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.UpdateOptions) (*storagev1.CSIDriver, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIDriver, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIDriverList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIDriver, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIDriverList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error)
- Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIDriver, err error)
+ Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIDriver, err error)
CSIDriverExpansion
}
// cSIDrivers implements CSIDriverInterface
type cSIDrivers struct {
- *gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration]
}
// newCSIDrivers returns a CSIDrivers
func newCSIDrivers(c *StorageV1Client) *cSIDrivers {
return &cSIDrivers{
- gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration](
"csidrivers",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.CSIDriver { return &v1.CSIDriver{} },
- func() *v1.CSIDriverList { return &v1.CSIDriverList{} }),
+ func() *storagev1.CSIDriver { return &storagev1.CSIDriver{} },
+ func() *storagev1.CSIDriverList { return &storagev1.CSIDriverList{} },
+ gentype.PrefersProtobuf[*storagev1.CSIDriver](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
index 6d28d7ed1..a4fe6a0ee 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
+ applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSINodesGetter interface {
// CSINodeInterface has methods to work with CSINode resources.
type CSINodeInterface interface {
- Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error)
- Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error)
+ Create(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.CreateOptions) (*storagev1.CSINode, error)
+ Update(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.UpdateOptions) (*storagev1.CSINode, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSINode, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSINodeList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error)
- Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error)
+ Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSINode, err error)
CSINodeExpansion
}
// cSINodes implements CSINodeInterface
type cSINodes struct {
- *gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration]
}
// newCSINodes returns a CSINodes
func newCSINodes(c *StorageV1Client) *cSINodes {
return &cSINodes{
- gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration](
"csinodes",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.CSINode { return &v1.CSINode{} },
- func() *v1.CSINodeList { return &v1.CSINodeList{} }),
+ func() *storagev1.CSINode { return &storagev1.CSINode{} },
+ func() *storagev1.CSINodeList { return &storagev1.CSINodeList{} },
+ gentype.PrefersProtobuf[*storagev1.CSINode](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
index 8a762b9ff..50a942978 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
+ applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface {
// CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources.
type CSIStorageCapacityInterface interface {
- Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (*v1.CSIStorageCapacity, error)
- Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (*v1.CSIStorageCapacity, error)
+ Create(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.CreateOptions) (*storagev1.CSIStorageCapacity, error)
+ Update(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.UpdateOptions) (*storagev1.CSIStorageCapacity, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIStorageCapacity, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIStorageCapacityList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIStorageCapacity, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIStorageCapacityList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error)
- Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIStorageCapacity, err error)
+ Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIStorageCapacity, err error)
CSIStorageCapacityExpansion
}
// cSIStorageCapacities implements CSIStorageCapacityInterface
type cSIStorageCapacities struct {
- *gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration]
}
// newCSIStorageCapacities returns a CSIStorageCapacities
func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities {
return &cSIStorageCapacities{
- gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration](
"csistoragecapacities",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} },
- func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }),
+ func() *storagev1.CSIStorageCapacity { return &storagev1.CSIStorageCapacity{} },
+ func() *storagev1.CSIStorageCapacityList { return &storagev1.CSIStorageCapacityList{} },
+ gentype.PrefersProtobuf[*storagev1.CSIStorageCapacity](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
index 750fe8b62..70aaff169 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1
import (
- "net/http"
+ http "net/http"
- v1 "k8s.io/api/storage/v1"
- "k8s.io/client-go/kubernetes/scheme"
+ storagev1 "k8s.io/api/storage/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -105,10 +105,10 @@ func New(c rest.Interface) *StorageV1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
+ gv := storagev1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
index d7b6ff68a..f33a351f1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
+ applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type StorageClassesGetter interface {
// StorageClassInterface has methods to work with StorageClass resources.
type StorageClassInterface interface {
- Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error)
- Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error)
+ Create(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.CreateOptions) (*storagev1.StorageClass, error)
+ Update(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.UpdateOptions) (*storagev1.StorageClass, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.StorageClass, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*storagev1.StorageClassList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error)
- Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error)
+ Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.StorageClass, err error)
StorageClassExpansion
}
// storageClasses implements StorageClassInterface
type storageClasses struct {
- *gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration]
}
// newStorageClasses returns a StorageClasses
func newStorageClasses(c *StorageV1Client) *storageClasses {
return &storageClasses{
- gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration](
"storageclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.StorageClass { return &v1.StorageClass{} },
- func() *v1.StorageClassList { return &v1.StorageClassList{} }),
+ func() *storagev1.StorageClass { return &storagev1.StorageClass{} },
+ func() *storagev1.StorageClassList { return &storagev1.StorageClassList{} },
+ gentype.PrefersProtobuf[*storagev1.StorageClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
index 3a0404284..60db4844f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1
import (
- "context"
+ context "context"
- v1 "k8s.io/api/storage/v1"
+ storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1 "k8s.io/client-go/applyconfigurations/storage/v1"
+ applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface {
// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
type VolumeAttachmentInterface interface {
- Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error)
- Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
+ Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.CreateOptions) (*storagev1.VolumeAttachment, error)
+ Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
+ UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error)
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.VolumeAttachment, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*storagev1.VolumeAttachmentList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error)
- Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error)
+ Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error)
+ ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error)
VolumeAttachmentExpansion
}
// volumeAttachments implements VolumeAttachmentInterface
type volumeAttachments struct {
- *gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration]
}
// newVolumeAttachments returns a VolumeAttachments
func newVolumeAttachments(c *StorageV1Client) *volumeAttachments {
return &volumeAttachments{
- gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration](
"volumeattachments",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} },
- func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }),
+ func() *storagev1.VolumeAttachment { return &storagev1.VolumeAttachment{} },
+ func() *storagev1.VolumeAttachmentList { return &storagev1.VolumeAttachmentList{} },
+ gentype.PrefersProtobuf[*storagev1.VolumeAttachment](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
index 6819deff6..63ca27fa4 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
+ applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface {
// CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources.
type CSIStorageCapacityInterface interface {
- Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*v1alpha1.CSIStorageCapacity, error)
- Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1alpha1.CSIStorageCapacity, error)
+ Create(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1alpha1.CSIStorageCapacity, error)
+ Update(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1alpha1.CSIStorageCapacity, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CSIStorageCapacity, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CSIStorageCapacityList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.CSIStorageCapacity, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.CSIStorageCapacityList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error)
- Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.CSIStorageCapacity, err error)
+ Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.CSIStorageCapacity, err error)
CSIStorageCapacityExpansion
}
// cSIStorageCapacities implements CSIStorageCapacityInterface
type cSIStorageCapacities struct {
- *gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration]
}
// newCSIStorageCapacities returns a CSIStorageCapacities
func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities {
return &cSIStorageCapacities{
- gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration](
"csistoragecapacities",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} },
- func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }),
+ func() *storagev1alpha1.CSIStorageCapacity { return &storagev1alpha1.CSIStorageCapacity{} },
+ func() *storagev1alpha1.CSIStorageCapacityList { return &storagev1alpha1.CSIStorageCapacityList{} },
+ gentype.PrefersProtobuf[*storagev1alpha1.CSIStorageCapacity](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
index 63e3fc243..17b680d19 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/storage/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -95,10 +95,10 @@ func New(c rest.Interface) *StorageV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := storagev1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
index 0982d5568..d9c24ab5b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
+ applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface {
// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
type VolumeAttachmentInterface interface {
- Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error)
- Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
+ Create(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttachment, error)
+ Update(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
+ UpdateStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttachment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttachmentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error)
- Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttachment, err error)
+ Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error)
+ ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error)
VolumeAttachmentExpansion
}
// volumeAttachments implements VolumeAttachmentInterface
type volumeAttachments struct {
- *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration]
}
// newVolumeAttachments returns a VolumeAttachments
func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments {
return &volumeAttachments{
- gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration](
"volumeattachments",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} },
- func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }),
+ func() *storagev1alpha1.VolumeAttachment { return &storagev1alpha1.VolumeAttachment{} },
+ func() *storagev1alpha1.VolumeAttachmentList { return &storagev1alpha1.VolumeAttachmentList{} },
+ gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttachment](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
index 40cff7588..ef7d6f4ba 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
+ applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type VolumeAttributesClassesGetter interface {
// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources.
type VolumeAttributesClassInterface interface {
- Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error)
- Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error)
+ Create(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttributesClass, error)
+ Update(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttributesClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttributesClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttributesClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error)
- Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttributesClass, err error)
+ Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttributesClass, err error)
VolumeAttributesClassExpansion
}
// volumeAttributesClasses implements VolumeAttributesClassInterface
type volumeAttributesClasses struct {
- *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration]
}
// newVolumeAttributesClasses returns a VolumeAttributesClasses
func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses {
return &volumeAttributesClasses{
- gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration](
"volumeattributesclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} },
- func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }),
+ func() *storagev1alpha1.VolumeAttributesClass { return &storagev1alpha1.VolumeAttributesClass{} },
+ func() *storagev1alpha1.VolumeAttributesClassList { return &storagev1alpha1.VolumeAttributesClassList{} },
+ gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttributesClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
index 2748919b4..063fdb8d1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSIDriversGetter interface {
// CSIDriverInterface has methods to work with CSIDriver resources.
type CSIDriverInterface interface {
- Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error)
- Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error)
+ Create(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.CreateOptions) (*storagev1beta1.CSIDriver, error)
+ Update(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.UpdateOptions) (*storagev1beta1.CSIDriver, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIDriver, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIDriverList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error)
- Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIDriver, err error)
+ Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIDriver, err error)
CSIDriverExpansion
}
// cSIDrivers implements CSIDriverInterface
type cSIDrivers struct {
- *gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration]
}
// newCSIDrivers returns a CSIDrivers
func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers {
return &cSIDrivers{
- gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration](
"csidrivers",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} },
- func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }),
+ func() *storagev1beta1.CSIDriver { return &storagev1beta1.CSIDriver{} },
+ func() *storagev1beta1.CSIDriverList { return &storagev1beta1.CSIDriverList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.CSIDriver](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
index fe6fe228e..5e8eb2e37 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSINodesGetter interface {
// CSINodeInterface has methods to work with CSINode resources.
type CSINodeInterface interface {
- Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error)
- Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error)
+ Create(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.CreateOptions) (*storagev1beta1.CSINode, error)
+ Update(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.UpdateOptions) (*storagev1beta1.CSINode, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSINode, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSINodeList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error)
- Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSINode, err error)
+ Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSINode, err error)
CSINodeExpansion
}
// cSINodes implements CSINodeInterface
type cSINodes struct {
- *gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration]
}
// newCSINodes returns a CSINodes
func newCSINodes(c *StorageV1beta1Client) *cSINodes {
return &cSINodes{
- gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration](
"csinodes",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.CSINode { return &v1beta1.CSINode{} },
- func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }),
+ func() *storagev1beta1.CSINode { return &storagev1beta1.CSINode{} },
+ func() *storagev1beta1.CSINodeList { return &storagev1beta1.CSINodeList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.CSINode](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
index e9ffc1df9..d1f5a7029 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type CSIStorageCapacitiesGetter interface {
// CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources.
type CSIStorageCapacityInterface interface {
- Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*v1beta1.CSIStorageCapacity, error)
- Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1beta1.CSIStorageCapacity, error)
+ Create(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1beta1.CSIStorageCapacity, error)
+ Update(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1beta1.CSIStorageCapacity, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIStorageCapacity, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIStorageCapacityList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIStorageCapacity, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIStorageCapacityList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error)
- Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIStorageCapacity, err error)
+ Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIStorageCapacity, err error)
CSIStorageCapacityExpansion
}
// cSIStorageCapacities implements CSIStorageCapacityInterface
type cSIStorageCapacities struct {
- *gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration]
}
// newCSIStorageCapacities returns a CSIStorageCapacities
func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities {
return &cSIStorageCapacities{
- gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration](
"csistoragecapacities",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} },
- func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }),
+ func() *storagev1beta1.CSIStorageCapacity { return &storagev1beta1.CSIStorageCapacity{} },
+ func() *storagev1beta1.CSIStorageCapacityList { return &storagev1beta1.CSIStorageCapacityList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.CSIStorageCapacity](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
index 3d1b59e36..63b1d42a3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1beta1
import (
- "net/http"
+ http "net/http"
- v1beta1 "k8s.io/api/storage/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -110,10 +110,10 @@ func New(c rest.Interface) *StorageV1beta1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1beta1.SchemeGroupVersion
+ gv := storagev1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
index fed699cc8..341d5ba82 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type StorageClassesGetter interface {
// StorageClassInterface has methods to work with StorageClass resources.
type StorageClassInterface interface {
- Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error)
- Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error)
+ Create(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.CreateOptions) (*storagev1beta1.StorageClass, error)
+ Update(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.UpdateOptions) (*storagev1beta1.StorageClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.StorageClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.StorageClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error)
- Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.StorageClass, err error)
+ Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.StorageClass, err error)
StorageClassExpansion
}
// storageClasses implements StorageClassInterface
type storageClasses struct {
- *gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration]
}
// newStorageClasses returns a StorageClasses
func newStorageClasses(c *StorageV1beta1Client) *storageClasses {
return &storageClasses{
- gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration](
"storageclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} },
- func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }),
+ func() *storagev1beta1.StorageClass { return &storagev1beta1.StorageClass{} },
+ func() *storagev1beta1.StorageClassList { return &storagev1beta1.StorageClassList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.StorageClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
index 01024ce48..42c1bd7e0 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,38 @@ type VolumeAttachmentsGetter interface {
// VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
type VolumeAttachmentInterface interface {
- Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error)
- Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
+ Create(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.CreateOptions) (*storagev1beta1.VolumeAttachment, error)
+ Update(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
+ UpdateStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttachment, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttachmentList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error)
- Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttachment, err error)
+ Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error)
+ ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error)
VolumeAttachmentExpansion
}
// volumeAttachments implements VolumeAttachmentInterface
type volumeAttachments struct {
- *gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration]
}
// newVolumeAttachments returns a VolumeAttachments
func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments {
return &volumeAttachments{
- gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration](
"volumeattachments",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} },
- func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }),
+ func() *storagev1beta1.VolumeAttachment { return &storagev1beta1.VolumeAttachment{} },
+ func() *storagev1beta1.VolumeAttachmentList { return &storagev1beta1.VolumeAttachmentList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.VolumeAttachment](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go
index 47eadcac6..09f9f1178 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1beta1
import (
- "context"
+ context "context"
- v1beta1 "k8s.io/api/storage/v1beta1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
+ applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,32 +38,34 @@ type VolumeAttributesClassesGetter interface {
// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources.
type VolumeAttributesClassInterface interface {
- Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error)
- Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error)
+ Create(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1beta1.VolumeAttributesClass, error)
+ Update(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttributesClass, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttributesClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttributesClassList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error)
- Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttributesClass, err error)
+ Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttributesClass, err error)
VolumeAttributesClassExpansion
}
// volumeAttributesClasses implements VolumeAttributesClassInterface
type volumeAttributesClasses struct {
- *gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration]
}
// newVolumeAttributesClasses returns a VolumeAttributesClasses
func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses {
return &volumeAttributesClasses{
- gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration](
"volumeattributesclasses",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} },
- func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }),
+ func() *storagev1beta1.VolumeAttributesClass { return &storagev1beta1.VolumeAttributesClass{} },
+ func() *storagev1beta1.VolumeAttributesClassList { return &storagev1beta1.VolumeAttributesClassList{} },
+ gentype.PrefersProtobuf[*storagev1beta1.VolumeAttributesClass](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go
index 613e45355..dcd5a4bf8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go
@@ -19,10 +19,10 @@ limitations under the License.
package v1alpha1
import (
- "net/http"
+ http "net/http"
- v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
+ storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -85,10 +85,10 @@ func New(c rest.Interface) *StoragemigrationV1alpha1Client {
}
func setConfigDefaults(config *rest.Config) error {
- gv := v1alpha1.SchemeGroupVersion
+ gv := storagemigrationv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
index 5fc0fd519..5c6981ec8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
@@ -19,13 +19,13 @@ limitations under the License.
package v1alpha1
import (
- "context"
+ context "context"
- v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
+ applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,36 +38,42 @@ type StorageVersionMigrationsGetter interface {
// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources.
type StorageVersionMigrationInterface interface {
- Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error)
- Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
+ Create(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
+ Update(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
+ UpdateStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagemigrationv1alpha1.StorageVersionMigrationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error)
- Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
+ Apply(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error)
+ ApplyStatus(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
StorageVersionMigrationExpansion
}
// storageVersionMigrations implements StorageVersionMigrationInterface
type storageVersionMigrations struct {
- *gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]
}
// newStorageVersionMigrations returns a StorageVersionMigrations
func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations {
return &storageVersionMigrations{
- gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration](
"storageversionmigrations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} },
- func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }),
+ func() *storagemigrationv1alpha1.StorageVersionMigration {
+ return &storagemigrationv1alpha1.StorageVersionMigration{}
+ },
+ func() *storagemigrationv1alpha1.StorageVersionMigrationList {
+ return &storagemigrationv1alpha1.StorageVersionMigrationList{}
+ },
+ gentype.PrefersProtobuf[*storagemigrationv1alpha1.StorageVersionMigration](),
+ ),
}
}
diff --git a/vendor/k8s.io/client-go/openapi/groupversion.go b/vendor/k8s.io/client-go/openapi/groupversion.go
index 601dcbe3c..40d91b9a5 100644
--- a/vendor/k8s.io/client-go/openapi/groupversion.go
+++ b/vendor/k8s.io/client-go/openapi/groupversion.go
@@ -27,6 +27,12 @@ const ContentTypeOpenAPIV3PB = "application/com.github.proto-openapi.spec.v3@v1.
type GroupVersion interface {
Schema(contentType string) ([]byte, error)
+
+ // ServerRelativeURL. Returns the path and parameters used to fetch the schema.
+ // You should use the Schema method to fetch it, but this value can be used
+ // to key the current version of the schema in a cache since it contains a
+ // hash string which changes upon schema update.
+ ServerRelativeURL() string
}
type groupversion struct {
@@ -68,3 +74,9 @@ func (g *groupversion) Schema(contentType string) ([]byte, error) {
return path.Do(context.TODO()).Raw()
}
+
+// URL used for fetching the schema. The URL includes a hash and can be used
+// to key the current version of the schema in a cache.
+func (g *groupversion) ServerRelativeURL() string {
+ return g.item.ServerRelativeURL
+}
diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go
index 60df7e568..159caa13f 100644
--- a/vendor/k8s.io/client-go/rest/client.go
+++ b/vendor/k8s.io/client-go/rest/client.go
@@ -17,16 +17,21 @@ limitations under the License.
package rest
import (
+ "fmt"
+ "mime"
"net/http"
"net/url"
"os"
"strconv"
"strings"
+ "sync/atomic"
"time"
+ "github.com/munnerz/goautoneg"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
+ clientfeatures "k8s.io/client-go/features"
"k8s.io/client-go/util/flowcontrol"
)
@@ -85,7 +90,7 @@ type RESTClient struct {
versionedAPIPath string
// content describes how a RESTClient encodes and decodes responses.
- content ClientContentConfig
+ content requestClientContentConfigProvider
// creates BackoffManager that is passed to requests.
createBackoffMgr func() BackoffManager
@@ -105,10 +110,6 @@ type RESTClient struct {
// NewRESTClient creates a new RESTClient. This client performs generic REST functions
// such as Get, Put, Post, and Delete on specified paths.
func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
- if len(config.ContentType) == 0 {
- config.ContentType = "application/json"
- }
-
base := *baseURL
if !strings.HasSuffix(base.Path, "/") {
base.Path += "/"
@@ -119,14 +120,53 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientConte
return &RESTClient{
base: &base,
versionedAPIPath: versionedAPIPath,
- content: config,
+ content: requestClientContentConfigProvider{base: scrubCBORContentConfigIfDisabled(config)},
createBackoffMgr: readExpBackoffConfig,
rateLimiter: rateLimiter,
-
- Client: client,
+ Client: client,
}, nil
}
+func scrubCBORContentConfigIfDisabled(content ClientContentConfig) ClientContentConfig {
+ if clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ content.Negotiator = clientNegotiatorWithCBORSequenceStreamDecoder{content.Negotiator}
+ return content
+ }
+
+ if mediatype, _, err := mime.ParseMediaType(content.ContentType); err == nil && mediatype == "application/cbor" {
+ content.ContentType = "application/json"
+ }
+
+ clauses := goautoneg.ParseAccept(content.AcceptContentTypes)
+ scrubbed := false
+ for i, clause := range clauses {
+ if clause.Type == "application" && clause.SubType == "cbor" {
+ scrubbed = true
+ clauses[i].SubType = "json"
+ }
+ }
+ if !scrubbed {
+ // No application/cbor in AcceptContentTypes, nothing more to do.
+ return content
+ }
+
+ parts := make([]string, 0, len(clauses))
+ for _, clause := range clauses {
+ // ParseAccept does not store the parameter "q" in Params.
+ params := clause.Params
+ if clause.Q < 1 { // omit q=1, it's the default
+ if params == nil {
+ params = make(map[string]string, 1)
+ }
+ params["q"] = strconv.FormatFloat(clause.Q, 'g', 3, 32)
+ }
+ parts = append(parts, mime.FormatMediaType(fmt.Sprintf("%s/%s", clause.Type, clause.SubType), params))
+ }
+ content.AcceptContentTypes = strings.Join(parts, ",")
+
+ return content
+}
+
// GetRateLimiter returns rate limiter for a given client, or nil if it's called on a nil client
func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
if c == nil {
@@ -198,5 +238,106 @@ func (c *RESTClient) Delete() *Request {
// APIVersion returns the APIVersion this RESTClient is expected to use.
func (c *RESTClient) APIVersion() schema.GroupVersion {
- return c.content.GroupVersion
+ config, _ := c.content.GetClientContentConfig()
+ return config.GroupVersion
+}
+
+// requestClientContentConfigProvider observes HTTP 415 (Unsupported Media Type) responses to detect
+// that the server does not understand CBOR. Once this has happened, future requests are forced to
+// use JSON so they can succeed. This is convenient for client users that want to prefer CBOR, but
+// also need to interoperate with older servers so requests do not permanently fail. The clients
+// will not default to using CBOR until at least all supported kube-apiservers have enable-CBOR
+// locked to true, so this path will be rarely taken. Additionally, all generated clients accessing
+// built-in kube resources are forced to protobuf, so those will not degrade to JSON.
+type requestClientContentConfigProvider struct {
+ base ClientContentConfig
+
+ // Becomes permanently true if a server responds with HTTP 415 (Unsupported Media Type) to a
+ // request with "Content-Type" header containing the CBOR media type.
+ sawUnsupportedMediaTypeForCBOR atomic.Bool
+}
+
+// GetClientContentConfig returns the ClientContentConfig that should be used for new requests by
+// this client and true if the request ContentType was selected by default.
+func (p *requestClientContentConfigProvider) GetClientContentConfig() (ClientContentConfig, bool) {
+ config := p.base
+
+ defaulted := config.ContentType == ""
+ if defaulted {
+ config.ContentType = "application/json"
+ }
+
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return config, defaulted
+ }
+
+ if defaulted && clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsPreferCBOR) {
+ config.ContentType = "application/cbor"
+ }
+
+ if sawUnsupportedMediaTypeForCBOR := p.sawUnsupportedMediaTypeForCBOR.Load(); !sawUnsupportedMediaTypeForCBOR {
+ return config, defaulted
+ }
+
+ if mediaType, _, _ := mime.ParseMediaType(config.ContentType); mediaType != runtime.ContentTypeCBOR {
+ return config, defaulted
+ }
+
+ // The effective ContentType is CBOR and the client has previously received an HTTP 415 in
+ // response to a CBOR request. Override ContentType to JSON.
+ config.ContentType = runtime.ContentTypeJSON
+ return config, defaulted
+}
+
+// UnsupportedMediaType reports that the server has responded to a request with HTTP 415 Unsupported
+// Media Type.
+func (p *requestClientContentConfigProvider) UnsupportedMediaType(requestContentType string) {
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return
+ }
+
+ // This could be extended to consider the Content-Encoding request header, the Accept and
+ // Accept-Encoding response headers, the request method, and URI (as mentioned in
+ // https://www.rfc-editor.org/rfc/rfc9110.html#section-15.5.16). The request Content-Type
+ // header is sufficient to implement a blanket CBOR fallback mechanism.
+ requestContentType, _, _ = mime.ParseMediaType(requestContentType)
+ switch requestContentType {
+ case runtime.ContentTypeCBOR, string(types.ApplyCBORPatchType):
+ p.sawUnsupportedMediaTypeForCBOR.Store(true)
+ }
+}
+
+// clientNegotiatorWithCBORSequenceStreamDecoder is a ClientNegotiator that delegates to another
+// ClientNegotiator to select the appropriate Encoder or Decoder for a given media type. As a
+// special case, it will resolve "application/cbor-seq" (a CBOR Sequence, the concatenation of zero
+// or more CBOR data items) as an alias for "application/cbor" (exactly one CBOR data item) when
+// selecting a stream decoder.
+type clientNegotiatorWithCBORSequenceStreamDecoder struct {
+ negotiator runtime.ClientNegotiator
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Encoder(contentType string, params map[string]string) (runtime.Encoder, error) {
+ return n.negotiator.Encoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Decoder(contentType string, params map[string]string) (runtime.Decoder, error) {
+ return n.negotiator.Decoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) StreamDecoder(contentType string, params map[string]string) (runtime.Decoder, runtime.Serializer, runtime.Framer, error) {
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+ return n.negotiator.StreamDecoder(contentType, params)
+ }
+
+ switch contentType {
+ case runtime.ContentTypeCBORSequence:
+ return n.negotiator.StreamDecoder(runtime.ContentTypeCBOR, params)
+ case runtime.ContentTypeCBOR:
+ // This media type is only appropriate for exactly one data item, not the zero or
+ // more events of a watch stream.
+ return nil, nil, nil, runtime.NegotiateError{ContentType: contentType, Stream: true}
+ default:
+ return n.negotiator.StreamDecoder(contentType, params)
+ }
+
}
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
index f8ff7e928..f2e813d07 100644
--- a/vendor/k8s.io/client-go/rest/config.go
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -32,6 +32,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/runtime/serializer/cbor"
+ "k8s.io/client-go/features"
"k8s.io/client-go/pkg/version"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/transport"
@@ -113,6 +116,9 @@ type Config struct {
// QPS indicates the maximum QPS to the master from this client.
// If it's zero, the created RESTClient will use DefaultQPS: 5
+ //
+ // Setting this to a negative value will disable client-side ratelimiting
+ // unless `Ratelimiter` is also set.
QPS float32
// Maximum burst for throttle.
@@ -669,3 +675,19 @@ func CopyConfig(config *Config) *Config {
}
return c
}
+
+// CodecFactoryForGeneratedClient returns the provided CodecFactory if there are no enabled client
+// feature gates affecting serialization. Otherwise, it constructs and returns a new CodecFactory
+// from the provided Scheme.
+//
+// This is supported ONLY for use by clients generated with client-gen. The caller is responsible
+// for ensuring that the CodecFactory argument was constructed using the Scheme argument.
+func CodecFactoryForGeneratedClient(scheme *runtime.Scheme, codecs serializer.CodecFactory) serializer.CodecFactory {
+ if !features.FeatureGates().Enabled(features.ClientsAllowCBOR) {
+ // NOTE: This assumes client-gen will not generate CBOR-enabled Codecs as long as
+ // the feature gate exists.
+ return codecs
+ }
+
+ return serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo))
+}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index f5a9f68ca..0ec90ad18 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -19,6 +19,7 @@ package rest
import (
"bytes"
"context"
+ "encoding/base64"
"encoding/hex"
"fmt"
"io"
@@ -99,6 +100,9 @@ func defaultRequestRetryFn(maxRetries int) WithRetry {
type Request struct {
c *RESTClient
+ contentConfig ClientContentConfig
+ contentTypeNotSet bool
+
warningHandler WarningHandler
rateLimiter flowcontrol.RateLimiter
@@ -123,7 +127,7 @@ type Request struct {
// output
err error
- // only one of body / bodyBytes may be set. requests using body are not retriable.
+ // only one of body / bodyBytes may be set. requests using body are not retryable.
body io.Reader
bodyBytes []byte
@@ -152,6 +156,11 @@ func NewRequest(c *RESTClient) *Request {
timeout = c.Client.Timeout
}
+ // A request needs to know whether the content type was explicitly configured or selected by
+ // default in order to support the per-request Protobuf override used by clients generated
+ // with --prefers-protobuf.
+ contentConfig, contentTypeDefaulted := c.content.GetClientContentConfig()
+
r := &Request{
c: c,
rateLimiter: c.rateLimiter,
@@ -161,14 +170,12 @@ func NewRequest(c *RESTClient) *Request {
maxRetries: 10,
retryFn: defaultRequestRetryFn,
warningHandler: c.warningHandler,
- }
- switch {
- case len(c.content.AcceptContentTypes) > 0:
- r.SetHeader("Accept", c.content.AcceptContentTypes)
- case len(c.content.ContentType) > 0:
- r.SetHeader("Accept", c.content.ContentType+", */*")
+ contentConfig: contentConfig,
+ contentTypeNotSet: contentTypeDefaulted,
}
+
+ r.setAcceptHeader()
return r
}
@@ -177,11 +184,36 @@ func NewRequestWithClient(base *url.URL, versionedAPIPath string, content Client
return NewRequest(&RESTClient{
base: base,
versionedAPIPath: versionedAPIPath,
- content: content,
+ content: requestClientContentConfigProvider{base: content},
Client: client,
})
}
+func (r *Request) UseProtobufAsDefaultIfPreferred(prefersProtobuf bool) *Request {
+ if prefersProtobuf {
+ return r.UseProtobufAsDefault()
+ }
+ return r
+}
+
+func (r *Request) UseProtobufAsDefault() *Request {
+ if r.contentTypeNotSet && len(r.contentConfig.AcceptContentTypes) == 0 {
+ r.contentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
+ r.contentConfig.ContentType = "application/vnd.kubernetes.protobuf"
+ r.setAcceptHeader()
+ }
+ return r
+}
+
+func (r *Request) setAcceptHeader() {
+ switch {
+ case len(r.contentConfig.AcceptContentTypes) > 0:
+ r.SetHeader("Accept", r.contentConfig.AcceptContentTypes)
+ case len(r.contentConfig.ContentType) > 0:
+ r.SetHeader("Accept", r.contentConfig.ContentType+", */*")
+ }
+}
+
// Verb sets the verb this request will use.
func (r *Request) Verb(verb string) *Request {
r.verb = verb
@@ -370,7 +402,7 @@ func (r *Request) Param(paramName, s string) *Request {
// VersionedParams will not write query parameters that have omitempty set and are empty. If a
// parameter has already been set it is appended to (Params and VersionedParams are additive).
func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
- return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion)
+ return r.SpecificallyVersionedParams(obj, codec, r.contentConfig.GroupVersion)
}
func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
@@ -450,11 +482,9 @@ func (r *Request) Body(obj interface{}) *Request {
r.err = err
return r
}
- glogBody("Request Body", data)
r.body = nil
r.bodyBytes = data
case []byte:
- glogBody("Request Body", t)
r.body = nil
r.bodyBytes = t
case io.Reader:
@@ -465,7 +495,7 @@ func (r *Request) Body(obj interface{}) *Request {
if reflect.ValueOf(t).IsNil() {
return r
}
- encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil)
+ encoder, err := r.contentConfig.Negotiator.Encoder(r.contentConfig.ContentType, nil)
if err != nil {
r.err = err
return r
@@ -475,10 +505,9 @@ func (r *Request) Body(obj interface{}) *Request {
r.err = err
return r
}
- glogBody("Request Body", data)
r.body = nil
r.bodyBytes = data
- r.SetHeader("Content-Type", r.c.content.ContentType)
+ r.SetHeader("Content-Type", r.contentConfig.ContentType)
default:
r.err = fmt.Errorf("unknown type used for body: %+v", obj)
}
@@ -704,10 +733,19 @@ func (b *throttledLogger) Infof(message string, args ...interface{}) {
// Watch attempts to begin watching the requested location.
// Returns a watch.Interface, or an error.
func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
+ w, _, e := r.watchInternal(ctx)
+ return w, e
+}
+
+func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) {
+ if r.body == nil {
+ logBody(ctx, 2, "Request Body", r.bodyBytes)
+ }
+
// We specifically don't want to rate limit watches, so we
// don't use r.rateLimiter here.
if r.err != nil {
- return nil, r.err
+ return nil, nil, r.err
}
client := r.c.Client
@@ -727,12 +765,12 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
url := r.URL().String()
for {
if err := retry.Before(ctx, r); err != nil {
- return nil, retry.WrapPreviousError(err)
+ return nil, nil, retry.WrapPreviousError(err)
}
req, err := r.newHTTPRequest(ctx)
if err != nil {
- return nil, err
+ return nil, nil, err
}
resp, err := client.Do(req)
@@ -752,21 +790,22 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
// the server must have sent us an error in 'err'
return true, nil
}
- if result := r.transformResponse(resp, req); result.err != nil {
- return true, result.err
+ result := r.transformResponse(ctx, resp, req)
+ if err := result.Error(); err != nil {
+ return true, err
}
return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
}()
if done {
if isErrRetryableFunc(req, err) {
- return watch.NewEmptyWatch(), nil
+ return watch.NewEmptyWatch(), nil, nil
}
if err == nil {
// if the server sent us an HTTP Response object,
// we need to return the error object from that.
err = transformErr
}
- return nil, retry.WrapPreviousError(err)
+ return nil, nil, retry.WrapPreviousError(err)
}
}
}
@@ -784,22 +823,35 @@ type WatchListResult struct {
// the end of the stream.
initialEventsEndBookmarkRV string
- // gv represents the API version
- // it is used to construct the final list response
- // normally this information is filled by the server
- gv schema.GroupVersion
+ // negotiatedObjectDecoder knows how to decode
+ // the initialEventsListBlueprint
+ negotiatedObjectDecoder runtime.Decoder
+
+ // base64EncodedInitialEventsListBlueprint contains an empty,
+ // versioned list encoded in the requested format
+ // (e.g., protobuf, JSON, CBOR) and stored as a base64-encoded string
+ base64EncodedInitialEventsListBlueprint string
}
+// Into stores the result into obj. The passed obj parameter must be a pointer to a list type.
+//
+// Note:
+//
+// Special attention should be given to the type *unstructured.Unstructured,
+// which represents a list type but does not have an "Items" field.
+// Users who directly use RESTClient may store the response in such an object.
+// This particular case is not handled by the current implementation of this function,
+// but may be considered for future updates.
func (r WatchListResult) Into(obj runtime.Object) error {
if r.err != nil {
return r.err
}
- listPtr, err := meta.GetItemsPtr(obj)
+ listItemsPtr, err := meta.GetItemsPtr(obj)
if err != nil {
return err
}
- listVal, err := conversion.EnforcePtr(listPtr)
+ listVal, err := conversion.EnforcePtr(listItemsPtr)
if err != nil {
return err
}
@@ -807,6 +859,16 @@ func (r WatchListResult) Into(obj runtime.Object) error {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
+ encodedInitialEventsListBlueprint, err := base64.StdEncoding.DecodeString(r.base64EncodedInitialEventsListBlueprint)
+ if err != nil {
+ return fmt.Errorf("failed to decode the received blueprint list, err %w", err)
+ }
+
+ err = runtime.DecodeInto(r.negotiatedObjectDecoder, encodedInitialEventsListBlueprint, obj)
+ if err != nil {
+ return err
+ }
+
if len(r.items) == 0 {
listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0))
} else {
@@ -824,15 +886,6 @@ func (r WatchListResult) Into(obj runtime.Object) error {
return err
}
listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV)
-
- typeMeta, err := meta.TypeAccessor(obj)
- if err != nil {
- return err
- }
- version := r.gv.String()
- typeMeta.SetAPIVersion(version)
- typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name())
-
return nil
}
@@ -844,6 +897,10 @@ func (r WatchListResult) Into(obj runtime.Object) error {
// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists
// to see what parameters are currently required.
func (r *Request) WatchList(ctx context.Context) WatchListResult {
+ if r.body == nil {
+ logBody(ctx, 2, "Request Body", r.bodyBytes)
+ }
+
if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)}
}
@@ -851,16 +908,16 @@ func (r *Request) WatchList(ctx context.Context) WatchListResult {
// Most users use the generated client, which handles the proper setting of parameters.
// We don't have validation for other methods (e.g., the Watch)
// thus, for symmetry, we haven't added additional checks for the WatchList method.
- w, err := r.Watch(ctx)
+ w, d, err := r.watchInternal(ctx)
if err != nil {
return WatchListResult{err: err}
}
- return r.handleWatchList(ctx, w)
+ return r.handleWatchList(ctx, w, d)
}
// handleWatchList holds the actual logic for easier unit testing.
// Note that this function will close the passed watch.
-func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult {
+func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negotiatedObjectDecoder runtime.Decoder) WatchListResult {
defer w.Stop()
var lastKey string
var items []runtime.Object
@@ -894,10 +951,15 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchL
lastKey = key
case watch.Bookmark:
if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" {
+ base64EncodedInitialEventsListBlueprint := meta.GetAnnotations()[metav1.InitialEventsListBlueprintAnnotationKey]
+ if len(base64EncodedInitialEventsListBlueprint) == 0 {
+ return WatchListResult{err: fmt.Errorf("%q annotation is missing content", metav1.InitialEventsListBlueprintAnnotationKey)}
+ }
return WatchListResult{
- items: items,
- initialEventsEndBookmarkRV: meta.GetResourceVersion(),
- gv: r.c.content.GroupVersion,
+ items: items,
+ initialEventsEndBookmarkRV: meta.GetResourceVersion(),
+ negotiatedObjectDecoder: negotiatedObjectDecoder,
+ base64EncodedInitialEventsListBlueprint: base64EncodedInitialEventsListBlueprint,
}
}
default:
@@ -907,15 +969,15 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchL
}
}
-func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) {
+func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) {
contentType := resp.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err)
}
- objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params)
+ objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params)
if err != nil {
- return nil, err
+ return nil, nil, err
}
handleWarnings(resp.Header, r.warningHandler)
@@ -928,7 +990,7 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error)
// use 500 to indicate that the cause of the error is unknown - other error codes
// are more specific to HTTP interactions, and set a reason
errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
- ), nil
+ ), objectDecoder, nil
}
// updateRequestResultMetric increments the RequestResult metric counter,
@@ -968,6 +1030,10 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) {
// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object.
// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
+ if r.body == nil {
+ logBody(ctx, 2, "Request Body", r.bodyBytes)
+ }
+
if r.err != nil {
return nil, r.err
}
@@ -1011,7 +1077,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) {
return false, nil
}
- result := r.transformResponse(resp, req)
+ result := r.transformResponse(ctx, resp, req)
if err := result.Error(); err != nil {
return true, err
}
@@ -1143,7 +1209,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
return false
}
// For connection errors and apiserver shutdown errors retry.
- if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
+ if net.IsConnectionReset(err) || net.IsProbableEOF(err) || net.IsHTTP2ConnectionLost(err) {
return true
}
return false
@@ -1165,6 +1231,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) {
metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength))
}
+ if resp != nil && resp.StatusCode == http.StatusUnsupportedMediaType {
+ r.c.content.UnsupportedMediaType(resp.Request.Header.Get("Content-Type"))
+ }
retry.After(ctx, r, resp, err)
done := func() bool {
@@ -1198,9 +1267,13 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
// - http.Client.Do errors are returned directly.
func (r *Request) Do(ctx context.Context) Result {
+ if r.body == nil {
+ logBody(ctx, 2, "Request Body", r.bodyBytes)
+ }
+
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
- result = r.transformResponse(resp, req)
+ result = r.transformResponse(ctx, resp, req)
})
if err != nil {
return Result{err: err}
@@ -1213,10 +1286,14 @@ func (r *Request) Do(ctx context.Context) Result {
// DoRaw executes the request but does not process the response body.
func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
+ if r.body == nil {
+ logBody(ctx, 2, "Request Body", r.bodyBytes)
+ }
+
var result Result
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
result.body, result.err = io.ReadAll(resp.Body)
- glogBody("Response Body", result.body)
+ logBody(ctx, 2, "Response Body", result.body)
if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
result.err = r.transformUnstructuredResponseError(resp, req, result.body)
}
@@ -1231,7 +1308,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
}
// transformResponse converts an API response into a structured API object
-func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result {
var body []byte
if resp.Body != nil {
data, err := io.ReadAll(resp.Body)
@@ -1260,13 +1337,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
}
}
- glogBody("Response Body", body)
+ // Call depth is tricky. This one is okay for Do and DoRaw.
+ logBody(ctx, 7, "Response Body", body)
// verify the content type is accurate
var decoder runtime.Decoder
contentType := resp.Header.Get("Content-Type")
if len(contentType) == 0 {
- contentType = r.c.content.ContentType
+ contentType = r.contentConfig.ContentType
}
if len(contentType) > 0 {
var err error
@@ -1274,7 +1352,7 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
if err != nil {
return Result{err: errors.NewInternalError(err)}
}
- decoder, err = r.c.content.Negotiator.Decoder(mediaType, params)
+ decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params)
if err != nil {
// if we fail to negotiate a decoder, treat this as an unstructured error
switch {
@@ -1320,14 +1398,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
}
// truncateBody decides if the body should be truncated, based on the glog Verbosity.
-func truncateBody(body string) string {
+func truncateBody(logger klog.Logger, body string) string {
max := 0
switch {
- case bool(klog.V(10).Enabled()):
+ case bool(logger.V(10).Enabled()):
return body
- case bool(klog.V(9).Enabled()):
+ case bool(logger.V(9).Enabled()):
max = 10240
- case bool(klog.V(8).Enabled()):
+ case bool(logger.V(8).Enabled()):
max = 1024
}
@@ -1338,17 +1416,21 @@ func truncateBody(body string) string {
return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
}
-// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// logBody logs a body output that could be either JSON or protobuf. It explicitly guards against
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
// whether the body is printable.
-func glogBody(prefix string, body []byte) {
- if klogV := klog.V(8); klogV.Enabled() {
+//
+// It needs to be called by all functions which send or receive the data.
+func logBody(ctx context.Context, callDepth int, prefix string, body []byte) {
+ logger := klog.FromContext(ctx)
+ if loggerV := logger.V(8); loggerV.Enabled() {
+ loggerV := loggerV.WithCallDepth(callDepth)
if bytes.IndexFunc(body, func(r rune) bool {
return r < 0x0a
}) != -1 {
- klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+ loggerV.Info(prefix, "body", truncateBody(logger, hex.Dump(body)))
} else {
- klogV.Infof("%s: %s", prefix, truncateBody(string(body)))
+ loggerV.Info(prefix, "body", truncateBody(logger, string(body)))
}
}
}
@@ -1397,7 +1479,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool,
}
var groupResource schema.GroupResource
if len(r.resource) > 0 {
- groupResource.Group = r.c.content.GroupVersion.Group
+ groupResource.Group = r.contentConfig.GroupVersion.Group
groupResource.Resource = r.resource
}
return errors.NewGenericServerResponse(
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
index c4ce6e3b8..0a0ab7917 100644
--- a/vendor/k8s.io/client-go/rest/url_utils.go
+++ b/vendor/k8s.io/client-go/rest/url_utils.go
@@ -61,7 +61,7 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de
return hostURL, versionedAPIPath, nil
}
-// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given
+// DefaultVersionedAPIPath constructs the default path for the given group version, assuming the given
// API path, following the standard conventions of the Kubernetes API.
func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
versionedAPIPath := path.Join("/", apiPath)
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
index 9e1e04d14..c2b68cbcb 100644
--- a/vendor/k8s.io/client-go/rest/watch/decoder.go
+++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package versioned
+package watch
import (
"fmt"
diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go
index e55aa12d9..a95b4985c 100644
--- a/vendor/k8s.io/client-go/rest/watch/encoder.go
+++ b/vendor/k8s.io/client-go/rest/watch/encoder.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package versioned
+package watch
import (
"encoding/json"
diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go
new file mode 100644
index 000000000..e7af4d6e8
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/actions.go
@@ -0,0 +1,901 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "path"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// All NewRoot... functions return non-namespaced actions, and are equivalent to
+// calling the corresponding New... function with an empty namespace.
+// This is assumed by the fake client generator.
+
+func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl {
+ return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{})
+}
+
+func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Name = name
+ action.GetOptions = opts
+
+ return action
+}
+
+func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl {
+ return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{})
+}
+
+func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+ action.GetOptions = opts
+
+ return action
+}
+
+func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl {
+ return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{})
+}
+
+func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Name = name
+ action.GetOptions = opts
+
+ return action
+}
+
+func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl {
+ return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{})
+}
+
+func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl {
+ action := GetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+ action.GetOptions = opts
+
+ return action
+}
+
+func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+ action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
+
+ return action
+}
+
+func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ action.ListOptions = opts
+
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+ action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
+
+ return action
+}
+
+func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ action.Namespace = namespace
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+ action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()}
+
+ return action
+}
+
+func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl {
+ action := ListActionImpl{}
+ action.Verb = "list"
+ action.Resource = resource
+ action.Kind = kind
+ action.Namespace = namespace
+ action.ListOptions = opts
+
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(opts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl {
+ return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{})
+}
+
+func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Object = object
+ action.CreateOptions = opts
+
+ return action
+}
+
+func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl {
+ return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{})
+}
+
+func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Object = object
+ action.CreateOptions = opts
+
+ return action
+}
+
+func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl {
+ return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{})
+}
+
+func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+ action.Object = object
+ action.CreateOptions = opts
+
+ return action
+}
+
+func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl {
+ return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{})
+}
+
+func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl {
+ action := CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Subresource = subresource
+ action.Name = name
+ action.Object = object
+ action.CreateOptions = opts
+
+ return action
+}
+
+func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl {
+ return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{})
+}
+
+func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Object = object
+ action.UpdateOptions = opts
+
+ return action
+}
+
+func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl {
+ return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{})
+}
+
+func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Object = object
+ action.UpdateOptions = opts
+
+ return action
+}
+
+func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl {
+ return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{})
+}
+
+func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+ action.PatchOptions = opts
+
+ return action
+}
+
+func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl {
+ return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{})
+}
+
+func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+ action.PatchOptions = opts
+
+ return action
+}
+
+func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
+ return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...)
+}
+
+func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Subresource = path.Join(subresources...)
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+ action.PatchOptions = opts
+
+ return action
+}
+
+func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl {
+ return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...)
+}
+
+func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl {
+ action := PatchActionImpl{}
+ action.Verb = "patch"
+ action.Resource = resource
+ action.Subresource = path.Join(subresources...)
+ action.Namespace = namespace
+ action.Name = name
+ action.PatchType = pt
+ action.Patch = patch
+ action.PatchOptions = opts
+
+ return action
+}
+
+func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl {
+ return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{})
+}
+
+func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Object = object
+ action.UpdateOptions = opts
+
+ return action
+}
+
+func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl {
+ return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{})
+}
+
+func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl {
+ action := UpdateActionImpl{}
+ action.Verb = "update"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Object = object
+ action.UpdateOptions = opts
+
+ return action
+}
+
+func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl {
+ return NewRootDeleteActionWithOptions(resource, name, metav1.DeleteOptions{})
+}
+
+func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.DeleteOptions) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Name = name
+ action.DeleteOptions = opts
+
+ return action
+}
+
+func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl {
+ return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{})
+}
+
+func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Name = name
+ action.DeleteOptions = opts
+
+ return action
+}
+
+func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl {
+ return NewDeleteActionWithOptions(resource, namespace, name, metav1.DeleteOptions{})
+}
+
+func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Name = name
+ action.DeleteOptions = opts
+
+ return action
+}
+
+func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl {
+ return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{})
+}
+
+func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl {
+ action := DeleteActionImpl{}
+ action.Verb = "delete"
+ action.Resource = resource
+ action.Subresource = subresource
+ action.Namespace = namespace
+ action.Name = name
+ action.DeleteOptions = opts
+
+ return action
+}
+
+func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl {
+ listOpts, _ := opts.(metav1.ListOptions)
+ return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts)
+}
+
+func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl {
+ action := DeleteCollectionActionImpl{}
+ action.Verb = "delete-collection"
+ action.Resource = resource
+ action.DeleteOptions = deleteOpts
+ action.ListOptions = listOpts
+
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl {
+ listOpts, _ := opts.(metav1.ListOptions)
+ return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts)
+}
+
+func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl {
+ action := DeleteCollectionActionImpl{}
+ action.Verb = "delete-collection"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.DeleteOptions = deleteOpts
+ action.ListOptions = listOpts
+
+ labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts)
+ action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector}
+
+ return action
+}
+
+func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl {
+ listOpts, _ := opts.(metav1.ListOptions)
+ return NewRootWatchActionWithOptions(resource, listOpts)
+}
+
+func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl {
+ action := WatchActionImpl{}
+ action.Verb = "watch"
+ action.Resource = resource
+ action.ListOptions = opts
+
+ labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
+ action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
+
+ return action
+}
+
+func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) {
+ var err error
+ switch t := opts.(type) {
+ case metav1.ListOptions:
+ labelSelector, err = labels.Parse(t.LabelSelector)
+ if err != nil {
+ panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err))
+ }
+ fieldSelector, err = fields.ParseSelector(t.FieldSelector)
+ if err != nil {
+ panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err))
+ }
+ resourceVersion = t.ResourceVersion
+ default:
+ panic(fmt.Errorf("expect a ListOptions %T", opts))
+ }
+ if labelSelector == nil {
+ labelSelector = labels.Everything()
+ }
+ if fieldSelector == nil {
+ fieldSelector = fields.Everything()
+ }
+ return labelSelector, fieldSelector, resourceVersion
+}
+
+func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl {
+ listOpts, _ := opts.(metav1.ListOptions)
+ return NewWatchActionWithOptions(resource, namespace, listOpts)
+}
+
+func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl {
+ action := WatchActionImpl{}
+ action.Verb = "watch"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.ListOptions = opts
+
+ labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts)
+ action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion}
+
+ return action
+}
+
+func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl {
+ action := ProxyGetActionImpl{}
+ action.Verb = "get"
+ action.Resource = resource
+ action.Namespace = namespace
+ action.Scheme = scheme
+ action.Name = name
+ action.Port = port
+ action.Path = path
+ action.Params = params
+ return action
+}
+
+type ListRestrictions struct {
+ Labels labels.Selector
+ Fields fields.Selector
+}
+type WatchRestrictions struct {
+ Labels labels.Selector
+ Fields fields.Selector
+ ResourceVersion string
+}
+
+type Action interface {
+ GetNamespace() string
+ GetVerb() string
+ GetResource() schema.GroupVersionResource
+ GetSubresource() string
+ Matches(verb, resource string) bool
+
+ // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this
+ // because the invocation logic deep copies before calls to storage and reactors.
+ DeepCopy() Action
+}
+
+type GenericAction interface {
+ Action
+ GetValue() interface{}
+}
+
+type GetAction interface {
+ Action
+ GetName() string
+}
+
+type ListAction interface {
+ Action
+ GetListRestrictions() ListRestrictions
+}
+
+type CreateAction interface {
+ Action
+ GetObject() runtime.Object
+}
+
+type UpdateAction interface {
+ Action
+ GetObject() runtime.Object
+}
+
+type DeleteAction interface {
+ Action
+ GetName() string
+ GetDeleteOptions() metav1.DeleteOptions
+}
+
+type DeleteCollectionAction interface {
+ Action
+ GetListRestrictions() ListRestrictions
+}
+
+type PatchAction interface {
+ Action
+ GetName() string
+ GetPatchType() types.PatchType
+ GetPatch() []byte
+}
+
+type WatchAction interface {
+ Action
+ GetWatchRestrictions() WatchRestrictions
+}
+
+type ProxyGetAction interface {
+ Action
+ GetScheme() string
+ GetName() string
+ GetPort() string
+ GetPath() string
+ GetParams() map[string]string
+}
+
+type ActionImpl struct {
+ Namespace string
+ Verb string
+ Resource schema.GroupVersionResource
+ Subresource string
+}
+
+func (a ActionImpl) GetNamespace() string {
+ return a.Namespace
+}
+func (a ActionImpl) GetVerb() string {
+ return a.Verb
+}
+func (a ActionImpl) GetResource() schema.GroupVersionResource {
+ return a.Resource
+}
+func (a ActionImpl) GetSubresource() string {
+ return a.Subresource
+}
+func (a ActionImpl) Matches(verb, resource string) bool {
+ // Stay backwards compatible.
+ if !strings.Contains(resource, "/") {
+ return strings.EqualFold(verb, a.Verb) &&
+ strings.EqualFold(resource, a.Resource.Resource)
+ }
+
+ parts := strings.SplitN(resource, "/", 2)
+ topresource, subresource := parts[0], parts[1]
+
+ return strings.EqualFold(verb, a.Verb) &&
+ strings.EqualFold(topresource, a.Resource.Resource) &&
+ strings.EqualFold(subresource, a.Subresource)
+}
+func (a ActionImpl) DeepCopy() Action {
+ ret := a
+ return ret
+}
+
+type GenericActionImpl struct {
+ ActionImpl
+ Value interface{}
+}
+
+func (a GenericActionImpl) GetValue() interface{} {
+ return a.Value
+}
+
+func (a GenericActionImpl) DeepCopy() Action {
+ return GenericActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ // TODO this is wrong, but no worse than before
+ Value: a.Value,
+ }
+}
+
+type GetActionImpl struct {
+ ActionImpl
+ Name string
+ GetOptions metav1.GetOptions
+}
+
+func (a GetActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a GetActionImpl) GetGetOptions() metav1.GetOptions {
+ return a.GetOptions
+}
+
+func (a GetActionImpl) DeepCopy() Action {
+ return GetActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ GetOptions: *a.GetOptions.DeepCopy(),
+ }
+}
+
+type ListActionImpl struct {
+ ActionImpl
+ Kind schema.GroupVersionKind
+ Name string
+ ListRestrictions ListRestrictions
+ ListOptions metav1.ListOptions
+}
+
+func (a ListActionImpl) GetKind() schema.GroupVersionKind {
+ return a.Kind
+}
+
+func (a ListActionImpl) GetListRestrictions() ListRestrictions {
+ return a.ListRestrictions
+}
+
+func (a ListActionImpl) GetListOptions() metav1.ListOptions {
+ return a.ListOptions
+}
+
+func (a ListActionImpl) DeepCopy() Action {
+ return ListActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Kind: a.Kind,
+ Name: a.Name,
+ ListRestrictions: ListRestrictions{
+ Labels: a.ListRestrictions.Labels.DeepCopySelector(),
+ Fields: a.ListRestrictions.Fields.DeepCopySelector(),
+ },
+ ListOptions: *a.ListOptions.DeepCopy(),
+ }
+}
+
+type CreateActionImpl struct {
+ ActionImpl
+ Name string
+ Object runtime.Object
+ CreateOptions metav1.CreateOptions
+}
+
+func (a CreateActionImpl) GetObject() runtime.Object {
+ return a.Object
+}
+
+func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions {
+ return a.CreateOptions
+}
+
+func (a CreateActionImpl) DeepCopy() Action {
+ return CreateActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ Object: a.Object.DeepCopyObject(),
+ CreateOptions: *a.CreateOptions.DeepCopy(),
+ }
+}
+
+type UpdateActionImpl struct {
+ ActionImpl
+ Object runtime.Object
+ UpdateOptions metav1.UpdateOptions
+}
+
+func (a UpdateActionImpl) GetObject() runtime.Object {
+ return a.Object
+}
+
+func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions {
+ return a.UpdateOptions
+}
+
+func (a UpdateActionImpl) DeepCopy() Action {
+ return UpdateActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Object: a.Object.DeepCopyObject(),
+ UpdateOptions: *a.UpdateOptions.DeepCopy(),
+ }
+}
+
+type PatchActionImpl struct {
+ ActionImpl
+ Name string
+ PatchType types.PatchType
+ Patch []byte
+ PatchOptions metav1.PatchOptions
+}
+
+func (a PatchActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a PatchActionImpl) GetPatch() []byte {
+ return a.Patch
+}
+
+func (a PatchActionImpl) GetPatchType() types.PatchType {
+ return a.PatchType
+}
+
+func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions {
+ return a.PatchOptions
+}
+
+func (a PatchActionImpl) DeepCopy() Action {
+ patch := make([]byte, len(a.Patch))
+ copy(patch, a.Patch)
+ return PatchActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ PatchType: a.PatchType,
+ Patch: patch,
+ PatchOptions: *a.PatchOptions.DeepCopy(),
+ }
+}
+
+type DeleteActionImpl struct {
+ ActionImpl
+ Name string
+ DeleteOptions metav1.DeleteOptions
+}
+
+func (a DeleteActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a DeleteActionImpl) GetDeleteOptions() metav1.DeleteOptions {
+ return a.DeleteOptions
+}
+
+func (a DeleteActionImpl) DeepCopy() Action {
+ return DeleteActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Name: a.Name,
+ DeleteOptions: *a.DeleteOptions.DeepCopy(),
+ }
+}
+
+type DeleteCollectionActionImpl struct {
+ ActionImpl
+ ListRestrictions ListRestrictions
+ DeleteOptions metav1.DeleteOptions
+ ListOptions metav1.ListOptions
+}
+
+func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions {
+ return a.ListRestrictions
+}
+
+func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions {
+ return a.DeleteOptions
+}
+
+func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions {
+ return a.ListOptions
+}
+
+func (a DeleteCollectionActionImpl) DeepCopy() Action {
+ return DeleteCollectionActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ ListRestrictions: ListRestrictions{
+ Labels: a.ListRestrictions.Labels.DeepCopySelector(),
+ Fields: a.ListRestrictions.Fields.DeepCopySelector(),
+ },
+ DeleteOptions: *a.DeleteOptions.DeepCopy(),
+ ListOptions: *a.ListOptions.DeepCopy(),
+ }
+}
+
+type WatchActionImpl struct {
+ ActionImpl
+ WatchRestrictions WatchRestrictions
+ ListOptions metav1.ListOptions
+}
+
+func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions {
+ return a.WatchRestrictions
+}
+
+func (a WatchActionImpl) GetListOptions() metav1.ListOptions {
+ return a.ListOptions
+}
+
+func (a WatchActionImpl) DeepCopy() Action {
+ return WatchActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ WatchRestrictions: WatchRestrictions{
+ Labels: a.WatchRestrictions.Labels.DeepCopySelector(),
+ Fields: a.WatchRestrictions.Fields.DeepCopySelector(),
+ ResourceVersion: a.WatchRestrictions.ResourceVersion,
+ },
+ ListOptions: *a.ListOptions.DeepCopy(),
+ }
+}
+
+type ProxyGetActionImpl struct {
+ ActionImpl
+ Scheme string
+ Name string
+ Port string
+ Path string
+ Params map[string]string
+}
+
+func (a ProxyGetActionImpl) GetScheme() string {
+ return a.Scheme
+}
+
+func (a ProxyGetActionImpl) GetName() string {
+ return a.Name
+}
+
+func (a ProxyGetActionImpl) GetPort() string {
+ return a.Port
+}
+
+func (a ProxyGetActionImpl) GetPath() string {
+ return a.Path
+}
+
+func (a ProxyGetActionImpl) GetParams() map[string]string {
+ return a.Params
+}
+
+func (a ProxyGetActionImpl) DeepCopy() Action {
+ params := map[string]string{}
+ for k, v := range a.Params {
+ params[k] = v
+ }
+ return ProxyGetActionImpl{
+ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl),
+ Scheme: a.Scheme,
+ Name: a.Name,
+ Port: a.Port,
+ Path: a.Path,
+ Params: params,
+ }
+}
diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go
new file mode 100644
index 000000000..3ab9c1b07
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/fake.go
@@ -0,0 +1,220 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "sync"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+// Fake implements client.Interface. Meant to be embedded into a struct to get
+// a default implementation. This makes faking out just the method you want to
+// test easier.
+type Fake struct {
+ sync.RWMutex
+ actions []Action // these may be castable to other types, but "Action" is the minimum
+
+ // ReactionChain is the list of reactors that will be attempted for every
+ // request in the order they are tried.
+ ReactionChain []Reactor
+ // WatchReactionChain is the list of watch reactors that will be attempted
+ // for every request in the order they are tried.
+ WatchReactionChain []WatchReactor
+ // ProxyReactionChain is the list of proxy reactors that will be attempted
+ // for every request in the order they are tried.
+ ProxyReactionChain []ProxyReactor
+
+ Resources []*metav1.APIResourceList
+}
+
+// Reactor is an interface to allow the composition of reaction functions.
+type Reactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles the action and returns results. It may choose to
+ // delegate by indicated handled=false.
+ React(action Action) (handled bool, ret runtime.Object, err error)
+}
+
+// WatchReactor is an interface to allow the composition of watch functions.
+type WatchReactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles a watch action and returns results. It may choose to
+ // delegate by indicating handled=false.
+ React(action Action) (handled bool, ret watch.Interface, err error)
+}
+
+// ProxyReactor is an interface to allow the composition of proxy get
+// functions.
+type ProxyReactor interface {
+ // Handles indicates whether or not this Reactor deals with a given
+ // action.
+ Handles(action Action) bool
+ // React handles a watch action and returns results. It may choose to
+ // delegate by indicating handled=false.
+ React(action Action) (handled bool, ret restclient.ResponseWrapper, err error)
+}
+
+// ReactionFunc is a function that returns an object or error for a given
+// Action. If "handled" is false, then the test client will ignore the
+// results and continue to the next ReactionFunc. A ReactionFunc can describe
+// reactions on subresources by testing the result of the action's
+// GetSubresource() method.
+type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error)
+
+// WatchReactionFunc is a function that returns a watch interface. If
+// "handled" is false, then the test client will ignore the results and
+// continue to the next ReactionFunc.
+type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error)
+
+// ProxyReactionFunc is a function that returns a ResponseWrapper interface
+// for a given Action. If "handled" is false, then the test client will
+// ignore the results and continue to the next ProxyReactionFunc.
+type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error)
+
+// AddReactor appends a reactor to the end of the chain.
+func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) {
+ c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction})
+}
+
+// PrependReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) {
+ c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...)
+}
+
+// AddWatchReactor appends a reactor to the end of the chain.
+func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.Lock()
+ defer c.Unlock()
+ c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction})
+}
+
+// PrependWatchReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.Lock()
+ defer c.Unlock()
+ c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...)
+}
+
+// AddProxyReactor appends a reactor to the end of the chain.
+func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) {
+ c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction})
+}
+
+// PrependProxyReactor adds a reactor to the beginning of the chain.
+func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) {
+ c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...)
+}
+
+// Invokes records the provided Action and then invokes the ReactionFunc that
+// handles the action if one exists. defaultReturnObj is expected to be of the
+// same type a normal call would return.
+func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.ReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled {
+ continue
+ }
+
+ return ret, err
+ }
+
+ return defaultReturnObj, nil
+}
+
+// InvokesWatch records the provided Action and then invokes the ReactionFunc
+// that handles the action if one exists.
+func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.WatchReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled {
+ continue
+ }
+
+ return ret, err
+ }
+
+ return nil, fmt.Errorf("unhandled watch: %#v", action)
+}
+
+// InvokesProxy records the provided Action and then invokes the ReactionFunc
+// that handles the action if one exists.
+func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper {
+ c.Lock()
+ defer c.Unlock()
+
+ actionCopy := action.DeepCopy()
+ c.actions = append(c.actions, action.DeepCopy())
+ for _, reactor := range c.ProxyReactionChain {
+ if !reactor.Handles(actionCopy) {
+ continue
+ }
+
+ handled, ret, err := reactor.React(actionCopy)
+ if !handled || err != nil {
+ continue
+ }
+
+ return ret
+ }
+
+ return nil
+}
+
+// ClearActions clears the history of actions called on the fake client.
+func (c *Fake) ClearActions() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.actions = make([]Action, 0)
+}
+
+// Actions returns a chronologically ordered slice fake actions called on the
+// fake client.
+func (c *Fake) Actions() []Action {
+ c.RLock()
+ defer c.RUnlock()
+ fa := make([]Action, len(c.actions))
+ copy(fa, c.actions)
+ return fa
+}
diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go
new file mode 100644
index 000000000..15b3e5334
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/fixture.go
@@ -0,0 +1,1006 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "reflect"
+ "sigs.k8s.io/structured-merge-diff/v4/typed"
+ "sigs.k8s.io/yaml"
+ "sort"
+ "strings"
+ "sync"
+
+ jsonpatch "gopkg.in/evanphx/json-patch.v4"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/managedfields"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+// ObjectTracker keeps track of objects. It is intended to be used to
+// fake calls to a server by returning objects based on their kind,
+// namespace and name.
+type ObjectTracker interface {
+ // Add adds an object to the tracker. If object being added
+ // is a list, its items are added separately.
+ Add(obj runtime.Object) error
+
+ // Get retrieves the object by its kind, namespace and name.
+ Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error)
+
+ // Create adds an object to the tracker in the specified namespace.
+ Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error
+
+ // Update updates an existing object in the tracker in the specified namespace.
+ Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error
+
+ // Patch patches an existing object in the tracker in the specified namespace.
+ Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error
+
+ // Apply applies an object in the tracker in the specified namespace.
+ Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error
+
+ // List retrieves all objects of a given kind in the given
+ // namespace. Only non-List kinds are accepted.
+ List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error)
+
+ // Delete deletes an existing object from the tracker. If object
+ // didn't exist in the tracker prior to deletion, Delete returns
+ // no error.
+ Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error
+
+ // Watch watches objects from the tracker. Watch returns a channel
+ // which will push added / modified / deleted object.
+ Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error)
+}
+
+// ObjectScheme abstracts the implementation of common operations on objects.
+type ObjectScheme interface {
+ runtime.ObjectCreater
+ runtime.ObjectTyper
+}
+
+// ObjectReaction returns a ReactionFunc that applies core.Action to
+// the given tracker.
+//
+// If tracker also implements ManagedFieldObjectTracker, then managed fields
+// will be handled by the tracker and apply patch actions will be evaluated
+// using the field manager and will take field ownership into consideration.
+// Without a ManagedFieldObjectTracker, apply patch actions do not consider
+// field ownership.
+//
+// WARNING: There is no server side defaulting, validation, or conversion handled
+// by the fake client and subresources are not handled accurately (fields in the
+// root resource are not automatically updated when a scale resource is updated, for example).
+func ObjectReaction(tracker ObjectTracker) ReactionFunc {
+ reactor := objectTrackerReact{tracker: tracker}
+ return func(action Action) (bool, runtime.Object, error) {
+ // Here and below we need to switch on implementation types,
+ // not on interfaces, as some interfaces are identical
+ // (e.g. UpdateAction and CreateAction), so if we use them,
+ // updates and creates end up matching the same case branch.
+ switch action := action.(type) {
+ case ListActionImpl:
+ obj, err := reactor.List(action)
+ return true, obj, err
+ case GetActionImpl:
+ obj, err := reactor.Get(action)
+ return true, obj, err
+ case CreateActionImpl:
+ obj, err := reactor.Create(action)
+ return true, obj, err
+ case UpdateActionImpl:
+ obj, err := reactor.Update(action)
+ return true, obj, err
+ case DeleteActionImpl:
+ obj, err := reactor.Delete(action)
+ return true, obj, err
+ case PatchActionImpl:
+ if action.GetPatchType() == types.ApplyPatchType {
+ obj, err := reactor.Apply(action)
+ return true, obj, err
+ }
+ obj, err := reactor.Patch(action)
+ return true, obj, err
+ default:
+ return false, nil, fmt.Errorf("no reaction implemented for %s", action)
+ }
+ }
+}
+
+type objectTrackerReact struct {
+ tracker ObjectTracker
+}
+
+func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) {
+ return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions)
+}
+
+func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) {
+ return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions)
+}
+
+func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) {
+ ns := action.GetNamespace()
+ gvr := action.GetResource()
+ objMeta, err := meta.Accessor(action.GetObject())
+ if err != nil {
+ return nil, err
+ }
+ if action.GetSubresource() == "" {
+ err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ if getOldObjErr != nil {
+ return nil, getOldObjErr
+ }
+ // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation.
+ if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) {
+ // TODO: Currently we're handling subresource creation as an update
+ // on the enclosing resource. This works for some subresources but
+ // might not be generic enough.
+ err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{
+ DryRun: action.CreateOptions.DryRun,
+ FieldManager: action.CreateOptions.FieldManager,
+ FieldValidation: action.CreateOptions.FieldValidation,
+ })
+ } else {
+ // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker.
+ return action.GetObject(), nil
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ return obj, err
+}
+
+func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) {
+ ns := action.GetNamespace()
+ gvr := action.GetResource()
+ objMeta, err := meta.Accessor(action.GetObject())
+ if err != nil {
+ return nil, err
+ }
+
+ err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ return obj, err
+}
+
+func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) {
+ err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions)
+ return nil, err
+}
+
+func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) {
+ ns := action.GetNamespace()
+ gvr := action.GetResource()
+
+ patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
+ if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil {
+ return nil, err
+ }
+ patchObj.SetName(action.GetName())
+ err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions)
+ if err != nil {
+ return nil, err
+ }
+ obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{})
+ return obj, err
+}
+
+func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) {
+ ns := action.GetNamespace()
+ gvr := action.GetResource()
+
+ obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ old, err := json.Marshal(obj)
+ if err != nil {
+ return nil, err
+ }
+
+ // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
+ // in obj that are removed by patch are cleared
+ value := reflect.ValueOf(obj)
+ value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
+
+ switch action.GetPatchType() {
+ case types.JSONPatchType:
+ patch, err := jsonpatch.DecodePatch(action.GetPatch())
+ if err != nil {
+ return nil, err
+ }
+ modified, err := patch.Apply(old)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = json.Unmarshal(modified, obj); err != nil {
+ return nil, err
+ }
+ case types.MergePatchType:
+ modified, err := jsonpatch.MergePatch(old, action.GetPatch())
+ if err != nil {
+ return nil, err
+ }
+
+ if err := json.Unmarshal(modified, obj); err != nil {
+ return nil, err
+ }
+ case types.StrategicMergePatchType:
+ mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj)
+ if err != nil {
+ return nil, err
+ }
+ if err = json.Unmarshal(mergedByte, obj); err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType())
+ }
+
+ if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil {
+ return nil, err
+ }
+
+ return obj, nil
+}
+
+type tracker struct {
+ scheme ObjectScheme
+ decoder runtime.Decoder
+ lock sync.RWMutex
+ objects map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object
+ // The value type of watchers is a map of which the key is either a namespace or
+ // all/non namespace aka "" and its value is list of fake watchers.
+ // Manipulations on resources will broadcast the notification events into the
+ // watchers' channel. Note that too many unhandled events (currently 100,
+ // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic.
+ watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher
+}
+
+var _ ObjectTracker = &tracker{}
+
+// NewObjectTracker returns an ObjectTracker that can be used to keep track
+// of objects for the fake clientset. Mostly useful for unit tests.
+func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker {
+ return &tracker{
+ scheme: scheme,
+ decoder: decoder,
+ objects: make(map[schema.GroupVersionResource]map[types.NamespacedName]runtime.Object),
+ watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher),
+ }
+}
+
+func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return nil, err
+ }
+ // Heuristic for list kind: original kind + List suffix. Might
+ // not always be true but this tracker has a pretty limited
+ // understanding of the actual API model.
+ listGVK := gvk
+ listGVK.Kind = listGVK.Kind + "List"
+ // GVK does have the concept of "internal version". The scheme recognizes
+ // the runtime.APIVersionInternal, but not the empty string.
+ if listGVK.Version == "" {
+ listGVK.Version = runtime.APIVersionInternal
+ }
+
+ list, err := t.scheme.New(listGVK)
+ if err != nil {
+ return nil, err
+ }
+
+ if !meta.IsListType(list) {
+ return nil, fmt.Errorf("%q is not a list type", listGVK.Kind)
+ }
+
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ objs, ok := t.objects[gvr]
+ if !ok {
+ return list, nil
+ }
+
+ matchingObjs, err := filterByNamespace(objs, ns)
+ if err != nil {
+ return nil, err
+ }
+ if err := meta.SetList(list, matchingObjs); err != nil {
+ return nil, err
+ }
+ return list.DeepCopyObject(), nil
+}
+
+func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ fakewatcher := watch.NewRaceFreeFake()
+
+ if _, exists := t.watchers[gvr]; !exists {
+ t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher)
+ }
+ t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher)
+ return fakewatcher, nil
+}
+
+func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return nil, err
+ }
+ errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name)
+
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ objs, ok := t.objects[gvr]
+ if !ok {
+ return nil, errNotFound
+ }
+
+ matchingObj, ok := objs[types.NamespacedName{Namespace: ns, Name: name}]
+ if !ok {
+ return nil, errNotFound
+ }
+
+ // Only one object should match in the tracker if it works
+ // correctly, as Add/Update methods enforce kind/namespace/name
+ // uniqueness.
+ obj := matchingObj.DeepCopyObject()
+ if status, ok := obj.(*metav1.Status); ok {
+ if status.Status != metav1.StatusSuccess {
+ return nil, &apierrors.StatusError{ErrStatus: *status}
+ }
+ }
+
+ return obj, nil
+}
+
+func (t *tracker) Add(obj runtime.Object) error {
+ if meta.IsListType(obj) {
+ return t.addList(obj, false)
+ }
+ objMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ gvks, _, err := t.scheme.ObjectKinds(obj)
+ if err != nil {
+ return err
+ }
+
+ if partial, ok := obj.(*metav1.PartialObjectMetadata); ok && len(partial.TypeMeta.APIVersion) > 0 {
+ gvks = []schema.GroupVersionKind{partial.TypeMeta.GroupVersionKind()}
+ }
+
+ if len(gvks) == 0 {
+ return fmt.Errorf("no registered kinds for %v", obj)
+ }
+ for _, gvk := range gvks {
+ // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The
+ // actual registration in apiserver can specify arbitrary route for a
+ // gvk. If a test uses such objects, it cannot preset the tracker with
+ // objects via Add(). Instead, it should trigger the Create() function
+ // of the tracker, where an arbitrary gvr can be specified.
+ gvr, _ := meta.UnsafeGuessKindToResource(gvk)
+ // Resource doesn't have the concept of "__internal" version, just set it to "".
+ if gvr.Version == runtime.APIVersionInternal {
+ gvr.Version = ""
+ }
+
+ err := t.add(gvr, obj, objMeta.GetNamespace(), false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return err
+ }
+ return t.add(gvr, obj, ns, false)
+}
+
+func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return err
+ }
+ return t.add(gvr, obj, ns, true)
+}
+
+func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return err
+ }
+ return t.add(gvr, patchedObject, ns, true)
+}
+
+func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return err
+ }
+ applyConfigurationMeta, err := meta.Accessor(applyConfiguration)
+ if err != nil {
+ return err
+ }
+
+ obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+
+ old, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+
+ // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
+ // in obj that are removed by patch are cleared
+ value := reflect.ValueOf(obj)
+ value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
+
+ // For backward compatibility with behavior 1.30 and earlier, continue to handle apply
+ // via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker
+ // for full field manager support).
+ patch, err := json.Marshal(applyConfiguration)
+ if err != nil {
+ return err
+ }
+ mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj)
+ if err != nil {
+ return err
+ }
+ if err = json.Unmarshal(mergedByte, obj); err != nil {
+ return err
+ }
+
+ return t.add(gvr, obj, ns, true)
+}
+
+func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher {
+ watches := []*watch.RaceFreeFakeWatcher{}
+ if t.watchers[gvr] != nil {
+ if w := t.watchers[gvr][ns]; w != nil {
+ watches = append(watches, w...)
+ }
+ if ns != metav1.NamespaceAll {
+ if w := t.watchers[gvr][metav1.NamespaceAll]; w != nil {
+ watches = append(watches, w...)
+ }
+ }
+ }
+ return watches
+}
+
+func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ gr := gvr.GroupResource()
+
+ // To avoid the object from being accidentally modified by caller
+ // after it's been added to the tracker, we always store the deep
+ // copy.
+ obj = obj.DeepCopyObject()
+
+ newMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+
+ // Propagate namespace to the new object if hasn't already been set.
+ if len(newMeta.GetNamespace()) == 0 {
+ newMeta.SetNamespace(ns)
+ }
+
+ if ns != newMeta.GetNamespace() {
+ msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace())
+ return apierrors.NewBadRequest(msg)
+ }
+
+ _, ok := t.objects[gvr]
+ if !ok {
+ t.objects[gvr] = make(map[types.NamespacedName]runtime.Object)
+ }
+
+ namespacedName := types.NamespacedName{Namespace: newMeta.GetNamespace(), Name: newMeta.GetName()}
+ if _, ok = t.objects[gvr][namespacedName]; ok {
+ if replaceExisting {
+ for _, w := range t.getWatches(gvr, ns) {
+ // To avoid the object from being accidentally modified by watcher
+ w.Modify(obj.DeepCopyObject())
+ }
+ t.objects[gvr][namespacedName] = obj
+ return nil
+ }
+ return apierrors.NewAlreadyExists(gr, newMeta.GetName())
+ }
+
+ if replaceExisting {
+ // Tried to update but no matching object was found.
+ return apierrors.NewNotFound(gr, newMeta.GetName())
+ }
+
+ t.objects[gvr][namespacedName] = obj
+
+ for _, w := range t.getWatches(gvr, ns) {
+ // To avoid the object from being accidentally modified by watcher
+ w.Add(obj.DeepCopyObject())
+ }
+
+ return nil
+}
+
+func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error {
+ list, err := meta.ExtractList(obj)
+ if err != nil {
+ return err
+ }
+ errs := runtime.DecodeList(list, t.decoder)
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ for _, obj := range list {
+ if err := t.Add(obj); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error {
+ _, err := assertOptionalSingleArgument(opts)
+ if err != nil {
+ return err
+ }
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ objs, ok := t.objects[gvr]
+ if !ok {
+ return apierrors.NewNotFound(gvr.GroupResource(), name)
+ }
+
+ namespacedName := types.NamespacedName{Namespace: ns, Name: name}
+ obj, ok := objs[namespacedName]
+ if !ok {
+ return apierrors.NewNotFound(gvr.GroupResource(), name)
+ }
+
+ delete(objs, namespacedName)
+ for _, w := range t.getWatches(gvr, ns) {
+ w.Delete(obj.DeepCopyObject())
+ }
+ return nil
+}
+
+type managedFieldObjectTracker struct {
+ ObjectTracker
+ scheme ObjectScheme
+ objectConverter runtime.ObjectConvertor
+ mapper meta.RESTMapper
+ typeConverter managedfields.TypeConverter
+}
+
+var _ ObjectTracker = &managedFieldObjectTracker{}
+
+// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track
+// of objects and managed fields for the fake clientset. Mostly useful for unit tests.
+func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker {
+ return &managedFieldObjectTracker{
+ ObjectTracker: NewObjectTracker(scheme, decoder),
+ scheme: scheme,
+ objectConverter: scheme,
+ mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme),
+ typeConverter: typeConverter,
+ }
+}
+
+func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error {
+ opts, err := assertOptionalSingleArgument(vopts)
+ if err != nil {
+ return err
+ }
+ gvk, err := t.mapper.KindFor(gvr)
+ if err != nil {
+ return err
+ }
+ mgr, err := t.fieldManagerFor(gvk)
+ if err != nil {
+ return err
+ }
+
+ objType, err := meta.TypeAccessor(obj)
+ if err != nil {
+ return err
+ }
+ // Stamp GVK
+ apiVersion, kind := gvk.ToAPIVersionAndKind()
+ objType.SetAPIVersion(apiVersion)
+ objType.SetKind(kind)
+
+ objMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ liveObject, err = t.scheme.New(gvk)
+ if err != nil {
+ return err
+ }
+ liveObject.GetObjectKind().SetGroupVersionKind(gvk)
+ } else if err != nil {
+ return err
+ }
+ objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager)
+ if err != nil {
+ return err
+ }
+ return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts)
+}
+
+func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error {
+ opts, err := assertOptionalSingleArgument(vopts)
+ if err != nil {
+ return err
+ }
+ gvk, err := t.mapper.KindFor(gvr)
+ if err != nil {
+ return err
+ }
+ mgr, err := t.fieldManagerFor(gvk)
+ if err != nil {
+ return err
+ }
+
+ objMeta, err := meta.Accessor(obj)
+ if err != nil {
+ return err
+ }
+ oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+ objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager)
+ if err != nil {
+ return err
+ }
+
+ return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts)
+}
+
+func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error {
+ opts, err := assertOptionalSingleArgument(vopts)
+ if err != nil {
+ return err
+ }
+ gvk, err := t.mapper.KindFor(gvr)
+ if err != nil {
+ return err
+ }
+ mgr, err := t.fieldManagerFor(gvk)
+ if err != nil {
+ return err
+ }
+
+ objMeta, err := meta.Accessor(patchedObject)
+ if err != nil {
+ return err
+ }
+ oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+ objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager)
+ if err != nil {
+ return err
+ }
+ return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...)
+}
+
+func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error {
+ opts, err := assertOptionalSingleArgument(vopts)
+ if err != nil {
+ return err
+ }
+ gvk, err := t.mapper.KindFor(gvr)
+ if err != nil {
+ return err
+ }
+ applyConfigurationMeta, err := meta.Accessor(applyConfiguration)
+ if err != nil {
+ return err
+ }
+
+ exists := true
+ liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ exists = false
+ liveObject, err = t.scheme.New(gvk)
+ if err != nil {
+ return err
+ }
+ liveObject.GetObjectKind().SetGroupVersionKind(gvk)
+ } else if err != nil {
+ return err
+ }
+ mgr, err := t.fieldManagerFor(gvk)
+ if err != nil {
+ return err
+ }
+ force := false
+ if opts.Force != nil {
+ force = *opts.Force
+ }
+ objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{
+ DryRun: opts.DryRun,
+ FieldManager: opts.FieldManager,
+ FieldValidation: opts.FieldValidation,
+ })
+ } else {
+ return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{
+ DryRun: opts.DryRun,
+ FieldManager: opts.FieldManager,
+ FieldValidation: opts.FieldValidation,
+ })
+ }
+}
+
+func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) {
+ return managedfields.NewDefaultFieldManager(
+ t.typeConverter,
+ t.objectConverter,
+ &objectDefaulter{},
+ t.scheme,
+ gvk,
+ gvk.GroupVersion(),
+ "",
+ nil)
+}
+
+// objectDefaulter implements runtime.Defaulter, but it actually
+// does nothing.
+type objectDefaulter struct{}
+
+func (d *objectDefaulter) Default(_ runtime.Object) {}
+
+// filterByNamespace returns all objects in the collection that
+// match provided namespace. Empty namespace matches
+// non-namespaced objects.
+func filterByNamespace(objs map[types.NamespacedName]runtime.Object, ns string) ([]runtime.Object, error) {
+ var res []runtime.Object
+
+ for _, obj := range objs {
+ acc, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ if ns != "" && acc.GetNamespace() != ns {
+ continue
+ }
+ res = append(res, obj)
+ }
+
+ // Sort res to get deterministic order.
+ sort.Slice(res, func(i, j int) bool {
+ acc1, _ := meta.Accessor(res[i])
+ acc2, _ := meta.Accessor(res[j])
+ if acc1.GetNamespace() != acc2.GetNamespace() {
+ return acc1.GetNamespace() < acc2.GetNamespace()
+ }
+ return acc1.GetName() < acc2.GetName()
+ })
+ return res, nil
+}
+
+func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc {
+ return func(action Action) (bool, watch.Interface, error) {
+ return true, watchInterface, err
+ }
+}
+
+// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions
+type SimpleReactor struct {
+ Verb string
+ Resource string
+
+ Reaction ReactionFunc
+}
+
+func (r *SimpleReactor) Handles(action Action) bool {
+ verbCovers := r.Verb == "*" || r.Verb == action.GetVerb()
+ if !verbCovers {
+ return false
+ }
+
+ return resourceCovers(r.Resource, action)
+}
+
+func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) {
+ return r.Reaction(action)
+}
+
+// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions
+type SimpleWatchReactor struct {
+ Resource string
+
+ Reaction WatchReactionFunc
+}
+
+func (r *SimpleWatchReactor) Handles(action Action) bool {
+ return resourceCovers(r.Resource, action)
+}
+
+func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) {
+ return r.Reaction(action)
+}
+
+// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value.
+// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions.
+type SimpleProxyReactor struct {
+ Resource string
+
+ Reaction ProxyReactionFunc
+}
+
+func (r *SimpleProxyReactor) Handles(action Action) bool {
+ return resourceCovers(r.Resource, action)
+}
+
+func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) {
+ return r.Reaction(action)
+}
+
+func resourceCovers(resource string, action Action) bool {
+ if resource == "*" {
+ return true
+ }
+
+ if resource == action.GetResource().Resource {
+ return true
+ }
+
+ if index := strings.Index(resource, "/"); index != -1 &&
+ resource[:index] == action.GetResource().Resource &&
+ resource[index+1:] == action.GetSubresource() {
+ return true
+ }
+
+ return false
+}
+
+// assertOptionalSingleArgument returns an error if there is more than one variadic argument.
+// Otherwise, it returns the first variadic argument, or zero value if there are no arguments.
+func assertOptionalSingleArgument[T any](arguments []T) (T, error) {
+ var a T
+ switch len(arguments) {
+ case 0:
+ return a, nil
+ case 1:
+ return arguments[0], nil
+ default:
+ return a, fmt.Errorf("expected only one option argument but got %d", len(arguments))
+ }
+}
+
+type TypeResolver interface {
+ Type(openAPIName string) typed.ParseableType
+}
+
+type TypeConverter struct {
+ Scheme *runtime.Scheme
+ TypeResolver TypeResolver
+}
+
+func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ name, err := tc.openAPIName(gvk)
+ if err != nil {
+ return nil, err
+ }
+ t := tc.TypeResolver.Type(name)
+ switch o := obj.(type) {
+ case *unstructured.Unstructured:
+ return t.FromUnstructured(o.UnstructuredContent(), opts...)
+ default:
+ return t.FromStructured(obj, opts...)
+ }
+}
+
+func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
+ vu := value.AsValue().Unstructured()
+ switch o := vu.(type) {
+ case map[string]interface{}:
+ return &unstructured.Unstructured{Object: o}, nil
+ default:
+ return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
+ }
+}
+
+func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) {
+ example, err := tc.Scheme.New(kind)
+ if err != nil {
+ return "", err
+ }
+ rtype := reflect.TypeOf(example).Elem()
+ name := friendlyName(rtype.PkgPath() + "." + rtype.Name())
+ return name, nil
+}
+
+// This is a copy of openapi.friendlyName.
+// TODO: consider introducing a shared version of this function in apimachinery.
+func friendlyName(name string) string {
+ nameParts := strings.Split(name, "/")
+ // Reverse first part. e.g., io.k8s... instead of k8s.io...
+ if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
+ parts := strings.Split(nameParts[0], ".")
+ for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
+ parts[i], parts[j] = parts[j], parts[i]
+ }
+ nameParts[0] = strings.Join(parts, ".")
+ }
+ return strings.Join(nameParts, ".")
+}
diff --git a/vendor/k8s.io/client-go/testing/interface.go b/vendor/k8s.io/client-go/testing/interface.go
new file mode 100644
index 000000000..266c6ba3f
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/interface.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+type FakeClient interface {
+ // Tracker gives access to the ObjectTracker internal to the fake client.
+ Tracker() ObjectTracker
+
+ // AddReactor appends a reactor to the end of the chain.
+ AddReactor(verb, resource string, reaction ReactionFunc)
+
+ // PrependReactor adds a reactor to the beginning of the chain.
+ PrependReactor(verb, resource string, reaction ReactionFunc)
+
+ // AddWatchReactor appends a reactor to the end of the chain.
+ AddWatchReactor(resource string, reaction WatchReactionFunc)
+
+ // PrependWatchReactor adds a reactor to the beginning of the chain.
+ PrependWatchReactor(resource string, reaction WatchReactionFunc)
+
+ // AddProxyReactor appends a reactor to the end of the chain.
+ AddProxyReactor(resource string, reaction ProxyReactionFunc)
+
+ // PrependProxyReactor adds a reactor to the beginning of the chain.
+ PrependProxyReactor(resource string, reaction ProxyReactionFunc)
+
+ // Invokes records the provided Action and then invokes the ReactionFunc that
+ // handles the action if one exists. defaultReturnObj is expected to be of the
+ // same type a normal call would return.
+ Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error)
+
+ // InvokesWatch records the provided Action and then invokes the ReactionFunc
+ // that handles the action if one exists.
+ InvokesWatch(action Action) (watch.Interface, error)
+
+ // InvokesProxy records the provided Action and then invokes the ReactionFunc
+ // that handles the action if one exists.
+ InvokesProxy(action Action) restclient.ResponseWrapper
+
+ // ClearActions clears the history of actions called on the fake client.
+ ClearActions()
+
+ // Actions returns a chronologically ordered slice fake actions called on the
+ // fake client.
+ Actions() []Action
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
index 35bb5dde1..c575652b1 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
@@ -50,7 +50,7 @@ func init() {
Scheme = runtime.NewScheme()
utilruntime.Must(api.AddToScheme(Scheme))
utilruntime.Must(v1.AddToScheme(Scheme))
- yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
+ yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true})
Codec = versioning.NewDefaultingCodecForScheme(
Scheme,
yamlSerializer,
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
index 952f6d7eb..cd0a8649b 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -29,8 +29,6 @@ import (
clientauth "k8s.io/client-go/tools/auth"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
-
- "github.com/imdario/mergo"
)
const (
@@ -241,45 +239,37 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
if err != nil {
return nil, err
}
- mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride)
+ if err := merge(clientConfig, userAuthPartialConfig); err != nil {
+ return nil, err
+ }
- serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
- if err != nil {
+ serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo)
+ if err := merge(clientConfig, serverAuthPartialConfig); err != nil {
return nil, err
}
- mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride)
}
return clientConfig, nil
}
// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
-// both, so we have to split the objects and merge them separately
-// we want this order of precedence for the server identification
-// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
-// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
-// 3. load the ~/.kubernetes_auth file as a default
-func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
- mergedConfig := &restclient.Config{}
+// both, so we have to split the objects and merge them separately.
- // configClusterInfo holds the information identify the server provided by .kubeconfig
+// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo
+// (the final result of command line flags and merged .kubeconfig files).
+func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config {
configClientConfig := &restclient.Config{}
configClientConfig.CAFile = configClusterInfo.CertificateAuthority
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
configClientConfig.ServerName = configClusterInfo.TLSServerName
- mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride)
- return mergedConfig, nil
+ return configClientConfig
}
-// clientauth.Info object contain both user identification and server identification. We want different precedence orders for
-// both, so we have to split the objects and merge them separately
-// we want this order of precedence for user identification
-// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
-// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
-// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
-// 4. if there is not enough information to identify the user, prompt if possible
+// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo
+// (the final result of command line flags and merged .kubeconfig files);
+// if the information available there is insufficient, it prompts (if possible) for additional information.
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
mergedConfig := &restclient.Config{}
@@ -338,8 +328,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
previouslyMergedConfig := mergedConfig
mergedConfig = &restclient.Config{}
- mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride)
- mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride)
+ if err := merge(mergedConfig, promptedConfig); err != nil {
+ return nil, err
+ }
+ if err := merge(mergedConfig, previouslyMergedConfig); err != nil {
+ return nil, err
+ }
config.promptedCredentials.username = mergedConfig.Username
config.promptedCredentials.password = mergedConfig.Password
}
@@ -347,7 +341,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
return mergedConfig, nil
}
-// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information
func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
config := &restclient.Config{}
config.Username = info.User
@@ -507,12 +501,16 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
mergedContext := clientcmdapi.NewContext()
if configContext, exists := contexts[contextName]; exists {
- mergo.Merge(mergedContext, configContext, mergo.WithOverride)
+ if err := merge(mergedContext, configContext); err != nil {
+ return clientcmdapi.Context{}, err
+ }
} else if required {
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
}
if config.overrides != nil {
- mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride)
+ if err := merge(mergedContext, &config.overrides.Context); err != nil {
+ return clientcmdapi.Context{}, err
+ }
}
return *mergedContext, nil
@@ -525,12 +523,16 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
mergedAuthInfo := clientcmdapi.NewAuthInfo()
if configAuthInfo, exists := authInfos[authInfoName]; exists {
- mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride)
+ if err := merge(mergedAuthInfo, configAuthInfo); err != nil {
+ return clientcmdapi.AuthInfo{}, err
+ }
} else if required {
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
}
if config.overrides != nil {
- mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride)
+ if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil {
+ return clientcmdapi.AuthInfo{}, err
+ }
}
return *mergedAuthInfo, nil
@@ -543,15 +545,21 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
mergedClusterInfo := clientcmdapi.NewCluster()
if config.overrides != nil {
- mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
}
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
- mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, configClusterInfo); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
} else if required {
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
}
if config.overrides != nil {
- mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride)
+ if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil {
+ return clientcmdapi.Cluster{}, err
+ }
}
// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
index b75737f1c..c900e5fd1 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -24,7 +24,6 @@ import (
goruntime "runtime"
"strings"
- "github.com/imdario/mergo"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
@@ -248,7 +247,9 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
mapConfig := clientcmdapi.NewConfig()
for _, kubeconfig := range kubeconfigs {
- mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride)
+ if err := merge(mapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// merge all of the struct values in the reverse order so that priority is given correctly
@@ -256,14 +257,20 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
nonMapConfig := clientcmdapi.NewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
- mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride)
+ if err := merge(nonMapConfig, kubeconfig); err != nil {
+ return nil, err
+ }
}
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
// get the values we expect.
config := clientcmdapi.NewConfig()
- mergo.Merge(config, mapConfig, mergo.WithOverride)
- mergo.Merge(config, nonMapConfig, mergo.WithOverride)
+ if err := merge(config, mapConfig); err != nil {
+ return nil, err
+ }
+ if err := merge(config, nonMapConfig); err != nil {
+ return nil, err
+ }
if rules.ResolvePaths() {
if err := ResolveLocalPaths(config); err != nil {
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merge.go b/vendor/k8s.io/client-go/tools/clientcmd/merge.go
new file mode 100644
index 000000000..3d74e6029
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/merge.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// recursively merges src into dst:
+// - non-pointer struct fields with any exported fields are recursively merged
+// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero
+// - maps are shallow merged with src keys taking priority over dst
+// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops
+func merge[T any](dst, src *T) error {
+ if dst == nil {
+ return fmt.Errorf("cannot merge into nil pointer")
+ }
+ if src == nil {
+ return nil
+ }
+ return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem())
+}
+
+func mergeValues(fieldNames []string, dst, src reflect.Value) error {
+ dstType := dst.Type()
+ // no-op if we can't read the src
+ if !src.IsValid() {
+ return nil
+ }
+ // sanity check types match
+ if srcType := src.Type(); dstType != srcType {
+ return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, "."))
+ }
+
+ switch dstType.Kind() {
+ case reflect.Struct:
+ if hasExportedField(dstType) {
+ // recursively merge
+ for i, n := 0, dstType.NumField(); i < n; i++ {
+ if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil {
+ return err
+ }
+ }
+ } else if dst.CanSet() {
+ // If all fields are unexported, overwrite with src.
+ // Using src.IsZero() would make more sense but that's not what mergo did.
+ dst.Set(src)
+ }
+
+ case reflect.Map:
+ if dst.CanSet() && !src.IsZero() {
+ // initialize dst if needed
+ if dst.IsZero() {
+ dst.Set(reflect.MakeMap(dstType))
+ }
+ // shallow-merge overwriting dst keys with src keys
+ for _, mapKey := range src.MapKeys() {
+ dst.SetMapIndex(mapKey, src.MapIndex(mapKey))
+ }
+ }
+
+ case reflect.Slice:
+ if dst.CanSet() && src.Len() > 0 {
+ // overwrite dst with non-empty src slice
+ dst.Set(src)
+ }
+
+ case reflect.Pointer:
+ if dst.CanSet() && !src.IsZero() {
+ // overwrite dst with non-zero values for other types
+ if dstType.Elem().Kind() == reflect.Struct {
+ // use struct pointer as-is
+ dst.Set(src)
+ } else {
+ // shallow-copy non-struct pointer (interfaces, primitives, etc)
+ dst.Set(reflect.New(dstType.Elem()))
+ dst.Elem().Set(src.Elem())
+ }
+ }
+
+ default:
+ if dst.CanSet() && !src.IsZero() {
+ // overwrite dst with non-zero values for other types
+ dst.Set(src)
+ }
+ }
+
+ return nil
+}
+
+// hasExportedField returns true if the given type has any exported fields,
+// or if it has any anonymous/embedded struct fields with exported fields
+func hasExportedField(dstType reflect.Type) bool {
+ for i, n := 0, dstType.NumField(); i < n; i++ {
+ field := dstType.Field(i)
+ if field.Anonymous && field.Type.Kind() == reflect.Struct {
+ if hasExportedField(dstType.Field(i).Type) {
+ return true
+ }
+ } else if len(field.PkgPath) == 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go
index abba06362..170074d4b 100644
--- a/vendor/k8s.io/client-go/tools/record/events_cache.go
+++ b/vendor/k8s.io/client-go/tools/record/events_cache.go
@@ -23,14 +23,13 @@ import (
"sync"
"time"
- "github.com/golang/groupcache/lru"
-
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/utils/clock"
+ "k8s.io/utils/lru"
)
const (
@@ -77,6 +76,7 @@ func getSpamKey(event *v1.Event) string {
event.InvolvedObject.Name,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
+ event.Type,
},
"")
}
@@ -90,8 +90,6 @@ type EventFilterFunc func(event *v1.Event) bool
// EventSourceObjectSpamFilter is responsible for throttling
// the amount of events a source and object can produce.
type EventSourceObjectSpamFilter struct {
- sync.RWMutex
-
// the cache that manages last synced state
cache *lru.Cache
@@ -133,8 +131,6 @@ func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool {
eventKey := f.spamKeyFunc(event)
// do we have a record of similar events in our cache?
- f.Lock()
- defer f.Unlock()
value, found := f.cache.Get(eventKey)
if found {
record = value.(spamRecord)
diff --git a/vendor/k8s.io/client-go/transport/cache_go118.go b/vendor/k8s.io/client-go/transport/cache_go118.go
index d21d5137d..babdaf8b5 100644
--- a/vendor/k8s.io/client-go/transport/cache_go118.go
+++ b/vendor/k8s.io/client-go/transport/cache_go118.go
@@ -18,7 +18,29 @@ limitations under the License.
package transport
+// this is just to make the "unused" linter rule happy
+var _ = isCacheKeyComparable[tlsCacheKey]
+
// assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime.
-var _ = isComparable[tlsCacheKey]
+//
+// Golang 1.20 introduced an exception to type constraints that allows comparable, but not
+// necessarily strictly comparable type arguments to satisfy the `comparable` type constraint,
+// thus allowing interfaces to fulfil the `comparable` constraint.
+// However, by definition, "A comparison of two interface values with identical
+// dynamic types causes a run-time panic if that type is not comparable".
+//
+// We want to make sure that comparing two `tlsCacheKey` elements won't cause a
+// runtime panic. In order to do that, we'll force the `tlsCacheKey` to be strictly
+// comparable, thus making it impossible for it to contain interfaces.
+// To assert strict comparability, we'll use another definition: "Type
+// parameters are comparable if they are strictly comparable".
+// Below, we first construct a type parameter from the `tlsCacheKey` type so that
+// we can then push this type parameter to a comparable check, thus checking these
+// are strictly comparable.
+//
+// Original suggestion from https://github.com/golang/go/issues/56548#issuecomment-1317673963
+func isCacheKeyComparable[K tlsCacheKey]() {
+ _ = isComparable[K]
+}
func isComparable[T comparable]() {}
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
index e2d1dcc9a..52fefb531 100644
--- a/vendor/k8s.io/client-go/transport/round_trippers.go
+++ b/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -86,6 +86,7 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
type authProxyRoundTripper struct {
username string
+ uid string
groups []string
extra map[string][]string
@@ -98,15 +99,17 @@ var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{}
// authentication terminating proxy cases
// assuming you pull the user from the context:
// username is the user.Info.GetName() of the user
+// uid is the user.Info.GetUID() of the user
// groups is the user.Info.GetGroups() of the user
// extra is the user.Info.GetExtra() of the user
// extra can contain any additional information that the authenticator
// thought was interesting, for example authorization scopes.
// In order to faithfully round-trip through an impersonation flow, these keys
// MUST be lowercase.
-func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
+func NewAuthProxyRoundTripper(username, uid string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
return &authProxyRoundTripper{
username: username,
+ uid: uid,
groups: groups,
extra: extra,
rt: rt,
@@ -115,14 +118,15 @@ func NewAuthProxyRoundTripper(username string, groups []string, extra map[string
func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req = utilnet.CloneRequest(req)
- SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra)
+ SetAuthProxyHeaders(req, rt.username, rt.uid, rt.groups, rt.extra)
return rt.rt.RoundTrip(req)
}
// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument.
-func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) {
+func SetAuthProxyHeaders(req *http.Request, username, uid string, groups []string, extra map[string][]string) {
req.Header.Del("X-Remote-User")
+ req.Header.Del("X-Remote-Uid")
req.Header.Del("X-Remote-Group")
for key := range req.Header {
if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) {
@@ -131,6 +135,9 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex
}
req.Header.Set("X-Remote-User", username)
+ if len(uid) > 0 {
+ req.Header.Set("X-Remote-Uid", uid)
+ }
for _, group := range groups {
req.Header.Add("X-Remote-Group", group)
}
diff --git a/vendor/k8s.io/client-go/util/apply/apply.go b/vendor/k8s.io/client-go/util/apply/apply.go
new file mode 100644
index 000000000..0cc85df6c
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/apply/apply.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apply
+
+import (
+ "fmt"
+
+ cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/client-go/features"
+ "k8s.io/client-go/rest"
+)
+
+// NewRequest builds a new server-side apply request. The provided apply configuration object will
+// be marshalled to the request's body using the default encoding, and the Content-Type header will
+// be set to application/apply-patch with the appropriate structured syntax name suffix (today,
+// either +yaml or +cbor, see
+// https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml).
+func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) {
+ pt := types.ApplyYAMLPatchType
+ marshal := json.Marshal
+
+ if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) {
+ pt = types.ApplyCBORPatchType
+ marshal = cbor.Marshal
+ }
+
+ body, err := marshal(applyConfiguration)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal apply configuration: %w", err)
+ }
+
+ return client.Patch(pt).Body(body), nil
+}
diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
index 7610c05c2..61b8fe28b 100644
--- a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
+++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
@@ -32,6 +32,12 @@ func init() {
dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR"))
}
+// IsDataConsistencyDetectionForListEnabled returns true when
+// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
+func IsDataConsistencyDetectionForListEnabled() bool {
+ return dataConsistencyDetectionForListFromCacheEnabled
+}
+
// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when
// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup
// for requests that have a high chance of being served from the watch-cache.
@@ -50,7 +56,7 @@ func init() {
// the cache (even though this might not be true for some requests)
// and issue the second call to get data from etcd for comparison.
func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
- if !dataConsistencyDetectionForListFromCacheEnabled {
+ if !IsDataConsistencyDetectionForListEnabled() {
return
}
checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
index 82e4c4c40..899b8e34e 100644
--- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
+++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -32,7 +32,12 @@ type backoffEntry struct {
type Backoff struct {
sync.RWMutex
- Clock clock.Clock
+ Clock clock.Clock
+ // HasExpiredFunc controls the logic that determines whether the backoff
+ // counter should be reset, and when to GC old backoff entries. If nil, the
+ // default hasExpired function will restart the backoff factor to the
+ // beginning after observing time has passed at least equal to 2*maxDuration
+ HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool
defaultDuration time.Duration
maxDuration time.Duration
perItemBackoff map[string]*backoffEntry
@@ -93,7 +98,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) {
p.Lock()
defer p.Unlock()
entry, ok := p.perItemBackoff[id]
- if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
entry = p.initEntryUnsafe(id)
entry.backoff += p.jitter(entry.backoff)
} else {
@@ -119,7 +124,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if !ok {
return false
}
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Since(eventTime) < entry.backoff
@@ -133,21 +138,21 @@ func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
if !ok {
return false
}
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return eventTime.Sub(entry.lastUpdate) < entry.backoff
}
-// Garbage collect records that have aged past maxDuration. Backoff users are expected
-// to invoke this periodically.
+// Garbage collect records that have aged past their expiration, which defaults
+// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke
+// this periodically.
func (p *Backoff) GC() {
p.Lock()
defer p.Unlock()
now := p.Clock.Now()
for id, entry := range p.perItemBackoff {
- if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
- // GC when entry has not been updated for 2*maxDuration
+ if p.hasExpired(now, entry.lastUpdate, p.maxDuration) {
delete(p.perItemBackoff, id)
}
}
@@ -174,7 +179,10 @@ func (p *Backoff) jitter(delay time.Duration) time.Duration {
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
}
-// After 2*maxDuration we restart the backoff factor to the beginning
-func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning
+func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+ if p.HasExpiredFunc != nil {
+ return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration)
+ }
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
}
diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
index 958b96a80..e33a6c692 100644
--- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
@@ -64,26 +64,33 @@ type TypedDelayingQueueConfig[T comparable] struct {
// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
// NewDelayingQueueWithConfig instead and specify a name.
//
-// Deprecated: use TypedNewDelayingQueue instead.
+// Deprecated: use NewTypedDelayingQueue instead.
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithConfig(DelayingQueueConfig{})
}
-// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability.
-// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
-// TypedNewDelayingQueueWithConfig instead and specify a name.
-func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability.
+// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewTypedDelayingQueueWithConfig instead and specify a name.
+func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] {
return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
}
// NewDelayingQueueWithConfig constructs a new workqueue with options to
// customize different properties.
//
-// Deprecated: use TypedNewDelayingQueueWithConfig instead.
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
return NewTypedDelayingQueueWithConfig[any](config)
}
+// TypedNewDelayingQueue exists for backwards compatibility only.
+//
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
+func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+ return NewTypedDelayingQueue[T]()
+}
+
// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
// customize different properties.
func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
@@ -134,7 +141,7 @@ func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T],
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
- waitingForAddCh: make(chan *waitFor, 1000),
+ waitingForAddCh: make(chan *waitFor[T], 1000),
metrics: newRetryMetrics(name, provider),
}
@@ -158,15 +165,15 @@ type delayingType[T comparable] struct {
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
- waitingForAddCh chan *waitFor
+ waitingForAddCh chan *waitFor[T]
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
-type waitFor struct {
- data t
+type waitFor[T any] struct {
+ data T
readyAt time.Time
// index in the priority queue (heap)
index int
@@ -180,15 +187,15 @@ type waitFor struct {
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
-type waitForPriorityQueue []*waitFor
+type waitForPriorityQueue[T any] []*waitFor[T]
-func (pq waitForPriorityQueue) Len() int {
+func (pq waitForPriorityQueue[T]) Len() int {
return len(pq)
}
-func (pq waitForPriorityQueue) Less(i, j int) bool {
+func (pq waitForPriorityQueue[T]) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
-func (pq waitForPriorityQueue) Swap(i, j int) {
+func (pq waitForPriorityQueue[T]) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
@@ -196,16 +203,16 @@ func (pq waitForPriorityQueue) Swap(i, j int) {
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
-func (pq *waitForPriorityQueue) Push(x interface{}) {
+func (pq *waitForPriorityQueue[T]) Push(x interface{}) {
n := len(*pq)
- item := x.(*waitFor)
+ item := x.(*waitFor[T])
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
-func (pq *waitForPriorityQueue) Pop() interface{} {
+func (pq *waitForPriorityQueue[T]) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
@@ -215,7 +222,7 @@ func (pq *waitForPriorityQueue) Pop() interface{} {
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
-func (pq waitForPriorityQueue) Peek() interface{} {
+func (pq waitForPriorityQueue[T]) Peek() interface{} {
return pq[0]
}
@@ -247,7 +254,7 @@ func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
select {
case <-q.stopCh:
// unblock if ShutDown() is called
- case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
+ case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
@@ -266,10 +273,10 @@ func (q *delayingType[T]) waitingLoop() {
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
- waitingForQueue := &waitForPriorityQueue{}
+ waitingForQueue := &waitForPriorityQueue[T]{}
heap.Init(waitingForQueue)
- waitingEntryByData := map[t]*waitFor{}
+ waitingEntryByData := map[T]*waitFor[T]{}
for {
if q.TypedInterface.ShuttingDown() {
@@ -280,13 +287,13 @@ func (q *delayingType[T]) waitingLoop() {
// Add ready entries
for waitingForQueue.Len() > 0 {
- entry := waitingForQueue.Peek().(*waitFor)
+ entry := waitingForQueue.Peek().(*waitFor[T])
if entry.readyAt.After(now) {
break
}
- entry = heap.Pop(waitingForQueue).(*waitFor)
- q.Add(entry.data.(T))
+ entry = heap.Pop(waitingForQueue).(*waitFor[T])
+ q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
@@ -296,7 +303,7 @@ func (q *delayingType[T]) waitingLoop() {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
- entry := waitingForQueue.Peek().(*waitFor)
+ entry := waitingForQueue.Peek().(*waitFor[T])
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
@@ -315,7 +322,7 @@ func (q *delayingType[T]) waitingLoop() {
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
- q.Add(waitEntry.data.(T))
+ q.Add(waitEntry.data)
}
drained := false
@@ -325,7 +332,7 @@ func (q *delayingType[T]) waitingLoop() {
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
- q.Add(waitEntry.data.(T))
+ q.Add(waitEntry.data)
}
default:
drained = true
@@ -336,7 +343,7 @@ func (q *delayingType[T]) waitingLoop() {
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
-func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
+func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go
index f012ccc55..4400cb65e 100644
--- a/vendor/k8s.io/client-go/util/workqueue/metrics.go
+++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go
@@ -26,10 +26,10 @@ import (
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
-type queueMetrics interface {
- add(item t)
- get(item t)
- done(item t)
+type queueMetrics[T comparable] interface {
+ add(item T)
+ get(item T)
+ done(item T)
updateUnfinishedWork()
}
@@ -70,7 +70,7 @@ func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
-type defaultQueueMetrics struct {
+type defaultQueueMetrics[T comparable] struct {
clock clock.Clock
// current depth of a workqueue
@@ -81,15 +81,15 @@ type defaultQueueMetrics struct {
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
- addTimes map[t]time.Time
- processingStartTimes map[t]time.Time
+ addTimes map[T]time.Time
+ processingStartTimes map[T]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
-func (m *defaultQueueMetrics) add(item t) {
+func (m *defaultQueueMetrics[T]) add(item T) {
if m == nil {
return
}
@@ -101,7 +101,7 @@ func (m *defaultQueueMetrics) add(item t) {
}
}
-func (m *defaultQueueMetrics) get(item t) {
+func (m *defaultQueueMetrics[T]) get(item T) {
if m == nil {
return
}
@@ -114,7 +114,7 @@ func (m *defaultQueueMetrics) get(item t) {
}
}
-func (m *defaultQueueMetrics) done(item t) {
+func (m *defaultQueueMetrics[T]) done(item T) {
if m == nil {
return
}
@@ -125,7 +125,7 @@ func (m *defaultQueueMetrics) done(item t) {
}
}
-func (m *defaultQueueMetrics) updateUnfinishedWork() {
+func (m *defaultQueueMetrics[T]) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
@@ -141,15 +141,15 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
m.longestRunningProcessor.Set(oldest)
}
-type noMetrics struct{}
+type noMetrics[T any] struct{}
-func (noMetrics) add(item t) {}
-func (noMetrics) get(item t) {}
-func (noMetrics) done(item t) {}
-func (noMetrics) updateUnfinishedWork() {}
+func (noMetrics[T]) add(item T) {}
+func (noMetrics[T]) get(item T) {}
+func (noMetrics[T]) done(item T) {}
+func (noMetrics[T]) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
-func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
+func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
@@ -210,28 +210,15 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
-var globalMetricsFactory = queueMetricsFactory{
- metricsProvider: noopMetricsProvider{},
-}
-
-type queueMetricsFactory struct {
- metricsProvider MetricsProvider
+var globalMetricsProvider MetricsProvider = noopMetricsProvider{}
- onlyOnce sync.Once
-}
+var setGlobalMetricsProviderOnce sync.Once
-func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
- f.onlyOnce.Do(func() {
- f.metricsProvider = mp
- })
-}
-
-func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
- mp := f.metricsProvider
+func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] {
if len(name) == 0 || mp == (noopMetricsProvider{}) {
- return noMetrics{}
+ return noMetrics[T]{}
}
- return &defaultQueueMetrics{
+ return &defaultQueueMetrics[T]{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
@@ -239,8 +226,8 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
- addTimes: map[t]time.Time{},
- processingStartTimes: map[t]time.Time{},
+ addTimes: map[T]time.Time{},
+ processingStartTimes: map[T]time.Time{},
}
}
@@ -251,7 +238,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
}
if provider == nil {
- provider = globalMetricsFactory.metricsProvider
+ provider = globalMetricsProvider
}
return &defaultRetryMetrics{
@@ -262,5 +249,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
- globalMetricsFactory.setProvider(metricsProvider)
+ setGlobalMetricsProviderOnce.Do(func() {
+ globalMetricsProvider = metricsProvider
+ })
}
diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go
index ff715482c..3cec1768a 100644
--- a/vendor/k8s.io/client-go/util/workqueue/queue.go
+++ b/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -138,13 +138,9 @@ func NewNamed(name string) *Type {
// newQueueWithConfig constructs a new named workqueue
// with the ability to customize different properties for testing purposes
func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
- var metricsFactory *queueMetricsFactory
+ metricsProvider := globalMetricsProvider
if config.MetricsProvider != nil {
- metricsFactory = &queueMetricsFactory{
- metricsProvider: config.MetricsProvider,
- }
- } else {
- metricsFactory = &globalMetricsFactory
+ metricsProvider = config.MetricsProvider
}
if config.Clock == nil {
@@ -158,12 +154,12 @@ func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod t
return newQueue(
config.Clock,
config.Queue,
- metricsFactory.newQueueMetrics(config.Name, config.Clock),
+ newQueueMetrics[T](metricsProvider, config.Name, config.Clock),
updatePeriod,
)
}
-func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] {
+func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] {
t := &Typed[T]{
clock: c,
queue: queue,
@@ -176,7 +172,7 @@ func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMet
// Don't start the goroutine for a type of noMetrics so we don't consume
// resources unnecessarily
- if _, ok := metrics.(noMetrics); !ok {
+ if _, ok := metrics.(noMetrics[T]); !ok {
go t.updateUnfinishedWorkLoop()
}
@@ -209,14 +205,13 @@ type Typed[t comparable] struct {
shuttingDown bool
drain bool
- metrics queueMetrics
+ metrics queueMetrics[t]
unfinishedWorkUpdatePeriod time.Duration
clock clock.WithTicker
}
type empty struct{}
-type t interface{}
type set[t comparable] map[t]empty
func (s set[t]) has(item t) bool {
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
index 5789e67ab..1b758ab25 100644
--- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
@@ -22,7 +22,7 @@ import (
"strings"
openapi_v2 "github.com/google/gnostic-models/openapiv2"
- "gopkg.in/yaml.v2"
+ yaml "sigs.k8s.io/yaml/goyaml.v2"
)
func newSchemaError(path *Path, format string, a ...interface{}) error {
diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go
index 79e11deb6..462c40c2c 100644
--- a/vendor/k8s.io/utils/clock/testing/fake_clock.go
+++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go
@@ -48,7 +48,6 @@ type fakeClockWaiter struct {
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
- fired bool
afterFunc func()
}
@@ -198,12 +197,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
- w.fired = true
default:
}
} else {
w.destChan <- t
- w.fired = true
}
if w.afterFunc != nil {
@@ -305,44 +302,48 @@ func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
-// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise.
+// Stop prevents the Timer from firing. It returns true if the call stops the
+// timer, false if the timer has already expired or been stopped.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
+ active := false
newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters))
for i := range f.fakeClock.waiters {
w := f.fakeClock.waiters[i]
if w != &f.waiter {
newWaiters = append(newWaiters, w)
+ continue
}
+ // If timer is found, it has not been fired yet.
+ active = true
}
f.fakeClock.waiters = newWaiters
- return !f.waiter.fired
+ return active
}
-// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet
-// fired, or false otherwise.
+// Reset changes the timer to expire after duration d. It returns true if the
+// timer had been active, false if the timer had expired or been stopped.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
- active := !f.waiter.fired
+ active := false
- f.waiter.fired = false
f.waiter.targetTime = f.fakeClock.time.Add(d)
- var isWaiting bool
for i := range f.fakeClock.waiters {
w := f.fakeClock.waiters[i]
if w == &f.waiter {
- isWaiting = true
+ // If timer is found, it has not been fired yet.
+ active = true
break
}
}
- if !isWaiting {
+ if !active {
f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter)
}
diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go
new file mode 100644
index 000000000..fd4db4407
--- /dev/null
+++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package lru implements an LRU cache.
+package golang_lru
+
+import "container/list"
+
+// Cache is an LRU cache. It is not safe for concurrent access.
+type Cache struct {
+ // MaxEntries is the maximum number of cache entries before
+ // an item is evicted. Zero means no limit.
+ MaxEntries int
+
+ // OnEvicted optionally specifies a callback function to be
+ // executed when an entry is purged from the cache.
+ OnEvicted func(key Key, value interface{})
+
+ ll *list.List
+ cache map[interface{}]*list.Element
+}
+
+// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
+type Key interface{}
+
+type entry struct {
+ key Key
+ value interface{}
+}
+
+// New creates a new Cache.
+// If maxEntries is zero, the cache has no limit and it's assumed
+// that eviction is done by the caller.
+func New(maxEntries int) *Cache {
+ return &Cache{
+ MaxEntries: maxEntries,
+ ll: list.New(),
+ cache: make(map[interface{}]*list.Element),
+ }
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+ if c.cache == nil {
+ c.cache = make(map[interface{}]*list.Element)
+ c.ll = list.New()
+ }
+ if ee, ok := c.cache[key]; ok {
+ c.ll.MoveToFront(ee)
+ ee.Value.(*entry).value = value
+ return
+ }
+ ele := c.ll.PushFront(&entry{key, value})
+ c.cache[key] = ele
+ if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
+ c.RemoveOldest()
+ }
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.ll.MoveToFront(ele)
+ return ele.Value.(*entry).value, true
+ }
+ return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.removeElement(ele)
+ }
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ if c.cache == nil {
+ return
+ }
+ ele := c.ll.Back()
+ if ele != nil {
+ c.removeElement(ele)
+ }
+}
+
+func (c *Cache) removeElement(e *list.Element) {
+ c.ll.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.cache, kv.key)
+ if c.OnEvicted != nil {
+ c.OnEvicted(kv.key, kv.value)
+ }
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ if c.cache == nil {
+ return 0
+ }
+ return c.ll.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+ if c.OnEvicted != nil {
+ for _, e := range c.cache {
+ kv := e.Value.(*entry)
+ c.OnEvicted(kv.key, kv.value)
+ }
+ }
+ c.ll = nil
+ c.cache = nil
+}
diff --git a/vendor/k8s.io/utils/lru/lru.go b/vendor/k8s.io/utils/lru/lru.go
new file mode 100644
index 000000000..40c22ece1
--- /dev/null
+++ b/vendor/k8s.io/utils/lru/lru.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package lru
+
+import (
+ "fmt"
+ "sync"
+
+ groupcache "k8s.io/utils/internal/third_party/forked/golang/golang-lru"
+)
+
+type Key = groupcache.Key
+type EvictionFunc = func(key Key, value interface{})
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache struct {
+ cache *groupcache.Cache
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size.
+func New(size int) *Cache {
+ return &Cache{
+ cache: groupcache.New(size),
+ }
+}
+
+// NewWithEvictionFunc creates an LRU of the given size with the given eviction func.
+func NewWithEvictionFunc(size int, f EvictionFunc) *Cache {
+ c := New(size)
+ c.cache.OnEvicted = f
+ return c
+}
+
+// SetEvictionFunc updates the eviction func
+func (c *Cache) SetEvictionFunc(f EvictionFunc) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.cache.OnEvicted != nil {
+ return fmt.Errorf("lru cache eviction function is already set")
+ }
+ c.cache.OnEvicted = f
+ return nil
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.Add(key, value)
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.cache.Get(key)
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.Remove(key)
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.RemoveOldest()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.cache.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.cache.Clear()
+}
diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go
deleted file mode 100644
index 8e21838f2..000000000
--- a/vendor/k8s.io/utils/strings/slices/slices.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package slices defines various functions useful with slices of string type.
-// The goal is to be as close as possible to
-// https://github.com/golang/go/issues/45955. Ideal would be if we can just
-// replace "stringslices" if the "slices" package becomes standard.
-package slices
-
-// Equal reports whether two slices are equal: the same length and all
-// elements equal. If the lengths are different, Equal returns false.
-// Otherwise, the elements are compared in index order, and the
-// comparison stops at the first unequal pair.
-func Equal(s1, s2 []string) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, n := range s1 {
- if n != s2[i] {
- return false
- }
- }
- return true
-}
-
-// Filter appends to d each element e of s for which keep(e) returns true.
-// It returns the modified d. d may be s[:0], in which case the kept
-// elements will be stored in the same slice.
-// if the slices overlap in some other way, the results are unspecified.
-// To create a new slice with the filtered results, pass nil for d.
-func Filter(d, s []string, keep func(string) bool) []string {
- for _, n := range s {
- if keep(n) {
- d = append(d, n)
- }
- }
- return d
-}
-
-// Contains reports whether v is present in s.
-func Contains(s []string, v string) bool {
- return Index(s, v) >= 0
-}
-
-// Index returns the index of the first occurrence of v in s, or -1 if
-// not present.
-func Index(s []string, v string) int {
- // "Contains" may be replaced with "Index(s, v) >= 0":
- // https://github.com/golang/go/issues/45955#issuecomment-873377947
- for i, n := range s {
- if n == v {
- return i
- }
- }
- return -1
-}
-
-// Functions below are not in https://github.com/golang/go/issues/45955
-
-// Clone returns a new clone of s.
-func Clone(s []string) []string {
- // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go
- if s == nil {
- return nil
- }
- c := make([]string, len(s))
- copy(c, s)
- return c
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6ad66210b..391055242 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,5 +1,5 @@
-# cloud.google.com/go/auth v0.8.1
-## explicit; go 1.20
+# cloud.google.com/go/auth v0.15.0
+## explicit; go 1.23.0
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
cloud.google.com/go/auth/credentials/internal/externalaccount
@@ -10,14 +10,15 @@ cloud.google.com/go/auth/credentials/internal/stsexchange
cloud.google.com/go/auth/grpctransport
cloud.google.com/go/auth/httptransport
cloud.google.com/go/auth/internal
+cloud.google.com/go/auth/internal/compute
cloud.google.com/go/auth/internal/credsfile
cloud.google.com/go/auth/internal/jwt
cloud.google.com/go/auth/internal/transport
cloud.google.com/go/auth/internal/transport/cert
-# cloud.google.com/go/auth/oauth2adapt v0.2.3
-## explicit; go 1.20
+# cloud.google.com/go/auth/oauth2adapt v0.2.8
+## explicit; go 1.23.0
cloud.google.com/go/auth/oauth2adapt
-# cloud.google.com/go/compute/metadata v0.5.2
+# cloud.google.com/go/compute/metadata v0.6.0
## explicit; go 1.21
cloud.google.com/go/compute/metadata
# cloud.google.com/go/monitoring v1.20.3
@@ -95,7 +96,7 @@ github.com/beorn7/perks/quantile
github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1
github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1
github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
-# github.com/cespare/xxhash/v2 v2.2.0
+# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/coreos/go-systemd/v22 v22.5.0
@@ -138,15 +139,15 @@ github.com/go-logr/stdr
## explicit; go 1.12
github.com/go-ole/go-ole
github.com/go-ole/go-ole/oleutil
-# github.com/go-openapi/jsonpointer v0.19.6
-## explicit; go 1.13
+# github.com/go-openapi/jsonpointer v0.21.0
+## explicit; go 1.20
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.22.4
-## explicit; go 1.18
+# github.com/go-openapi/swag v0.23.0
+## explicit; go 1.20
github.com/go-openapi/swag
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
@@ -170,8 +171,8 @@ github.com/google/gnostic-models/extensions
github.com/google/gnostic-models/jsonschema
github.com/google/gnostic-models/openapiv2
github.com/google/gnostic-models/openapiv3
-# github.com/google/go-cmp v0.6.0
-## explicit; go 1.13
+# github.com/google/go-cmp v0.7.0
+## explicit; go 1.21
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
@@ -181,7 +182,7 @@ github.com/google/go-cmp/cmp/internal/value
## explicit; go 1.12
github.com/google/gofuzz
github.com/google/gofuzz/bytesource
-# github.com/google/s2a-go v0.1.8
+# github.com/google/s2a-go v0.1.9
## explicit; go 1.20
github.com/google/s2a-go
github.com/google/s2a-go/fallback
@@ -207,17 +208,19 @@ github.com/google/s2a-go/stream
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
-# github.com/googleapis/enterprise-certificate-proxy v0.3.2
-## explicit; go 1.19
+# github.com/googleapis/enterprise-certificate-proxy v0.3.6
+## explicit; go 1.23.0
github.com/googleapis/enterprise-certificate-proxy/client
github.com/googleapis/enterprise-certificate-proxy/client/util
-# github.com/googleapis/gax-go/v2 v2.13.0
-## explicit; go 1.20
+# github.com/googleapis/gax-go/v2 v2.14.1
+## explicit; go 1.21
github.com/googleapis/gax-go/v2
github.com/googleapis/gax-go/v2/apierror
github.com/googleapis/gax-go/v2/apierror/internal/proto
github.com/googleapis/gax-go/v2/callctx
github.com/googleapis/gax-go/v2/internal
+github.com/googleapis/gax-go/v2/internallog
+github.com/googleapis/gax-go/v2/internallog/internal
# github.com/hpcloud/tail v1.0.0
## explicit
github.com/hpcloud/tail
@@ -225,9 +228,6 @@ github.com/hpcloud/tail/ratelimiter
github.com/hpcloud/tail/util
github.com/hpcloud/tail/watch
github.com/hpcloud/tail/winfile
-# github.com/imdario/mergo v0.3.12
-## explicit; go 1.13
-github.com/imdario/mergo
# github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
@@ -237,6 +237,15 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
+# github.com/klauspost/compress v1.17.9
+## explicit; go 1.20
+github.com/klauspost/compress
+github.com/klauspost/compress/fse
+github.com/klauspost/compress/huff0
+github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/snapref
+github.com/klauspost/compress/zstd
+github.com/klauspost/compress/zstd/internal/xxhash
# github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0
## explicit; go 1.16
github.com/lufia/plan9stats
@@ -254,26 +263,31 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
+# github.com/pkg/errors v0.9.1
+## explicit
+github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c
## explicit; go 1.14
github.com/power-devops/perfstat
-# github.com/prometheus/client_golang v1.19.1
+# github.com/prometheus/client_golang v1.20.4
## explicit; go 1.20
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.55.0
-## explicit; go 1.20
+# github.com/prometheus/common v0.63.0
+## explicit; go 1.21
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.0
+## explicit; go 1.21
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -327,7 +341,6 @@ go.opencensus.io/internal/tagencoding
go.opencensus.io/metric/metricdata
go.opencensus.io/metric/metricexport
go.opencensus.io/metric/metricproducer
-go.opencensus.io/plugin/ocgrpc
go.opencensus.io/plugin/ochttp
go.opencensus.io/plugin/ochttp/propagation/b3
go.opencensus.io/resource
@@ -340,16 +353,22 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0
-## explicit; go 1.20
+# go.opentelemetry.io/auto/sdk v1.1.0
+## explicit; go 1.22.0
+go.opentelemetry.io/auto/sdk
+go.opentelemetry.io/auto/sdk/internal/telemetry
+# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0
+## explicit; go 1.22.0
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0
-## explicit; go 1.20
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0
+## explicit; go 1.22.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.24.0
-## explicit; go 1.20
+# go.opentelemetry.io/otel v1.34.0
+## explicit; go 1.22.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/baggage
@@ -361,17 +380,19 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.20.0
-# go.opentelemetry.io/otel/metric v1.24.0
-## explicit; go 1.20
+go.opentelemetry.io/otel/semconv/v1.26.0
+# go.opentelemetry.io/otel/metric v1.34.0
+## explicit; go 1.22.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/trace v1.24.0
-## explicit; go 1.20
+# go.opentelemetry.io/otel/trace v1.34.0
+## explicit; go 1.22.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
-# golang.org/x/crypto v0.31.0
-## explicit; go 1.20
+go.opentelemetry.io/otel/trace/noop
+# golang.org/x/crypto v0.36.0
+## explicit; go 1.23.0
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
golang.org/x/crypto/cryptobyte
@@ -379,16 +400,17 @@ golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
-# golang.org/x/net v0.33.0
-## explicit; go 1.18
+# golang.org/x/net v0.37.0
+## explicit; go 1.23.0
golang.org/x/net/http/httpguts
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
+golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.22.0
-## explicit; go 1.18
+# golang.org/x/oauth2 v0.28.0
+## explicit; go 1.23.0
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
golang.org/x/oauth2/google
@@ -399,11 +421,11 @@ golang.org/x/oauth2/google/internal/stsexchange
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
-# golang.org/x/sync v0.10.0
-## explicit; go 1.18
+# golang.org/x/sync v0.12.0
+## explicit; go 1.23.0
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.28.0
-## explicit; go 1.18
+# golang.org/x/sys v0.32.0
+## explicit; go 1.23.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
@@ -412,20 +434,20 @@ golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc
golang.org/x/sys/windows/svc/debug
golang.org/x/sys/windows/svc/eventlog
-# golang.org/x/term v0.27.0
-## explicit; go 1.18
+# golang.org/x/term v0.30.0
+## explicit; go 1.23.0
golang.org/x/term
-# golang.org/x/text v0.21.0
-## explicit; go 1.18
+# golang.org/x/text v0.23.0
+## explicit; go 1.23.0
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.6.0
-## explicit; go 1.18
+# golang.org/x/time v0.11.0
+## explicit; go 1.23.0
golang.org/x/time/rate
-# google.golang.org/api v0.192.0
-## explicit; go 1.20
+# google.golang.org/api v0.228.0
+## explicit; go 1.23.0
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
google.golang.org/api/internal
@@ -438,35 +460,38 @@ google.golang.org/api/option/internaloption
google.golang.org/api/support/bundler
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
-google.golang.org/api/transport/http/internal/propagation
# google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf
## explicit; go 1.20
google.golang.org/genproto/googleapis/devtools/cloudtrace/v2
google.golang.org/genproto/googleapis/monitoring/v3
google.golang.org/genproto/googleapis/type/calendarperiod
-# google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f
-## explicit; go 1.20
+# google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422
+## explicit; go 1.22
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/distribution
google.golang.org/genproto/googleapis/api/label
google.golang.org/genproto/googleapis/api/metric
google.golang.org/genproto/googleapis/api/monitoredres
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf
-## explicit; go 1.20
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4
+## explicit; go 1.23.0
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.64.1
-## explicit; go 1.19
+# google.golang.org/grpc v1.71.0
+## explicit; go 1.22.0
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
google.golang.org/grpc/balancer
google.golang.org/grpc/balancer/base
+google.golang.org/grpc/balancer/endpointsharding
google.golang.org/grpc/balancer/grpclb
google.golang.org/grpc/balancer/grpclb/grpc_lb_v1
google.golang.org/grpc/balancer/grpclb/state
+google.golang.org/grpc/balancer/pickfirst
+google.golang.org/grpc/balancer/pickfirst/internal
+google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -485,7 +510,9 @@ google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/credentials/oauth
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
google.golang.org/grpc/internal/balancer/gracefulswitch
@@ -497,24 +524,27 @@ google.golang.org/grpc/internal/credentials
google.golang.org/grpc/internal/envconfig
google.golang.org/grpc/internal/googlecloud
google.golang.org/grpc/internal/grpclog
-google.golang.org/grpc/internal/grpcrand
google.golang.org/grpc/internal/grpcsync
google.golang.org/grpc/internal/grpcutil
google.golang.org/grpc/internal/idle
google.golang.org/grpc/internal/metadata
google.golang.org/grpc/internal/pretty
+google.golang.org/grpc/internal/proxyattributes
google.golang.org/grpc/internal/resolver
+google.golang.org/grpc/internal/resolver/delegatingresolver
google.golang.org/grpc/internal/resolver/dns
google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/resolver/unix
google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/stats
google.golang.org/grpc/internal/status
google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
google.golang.org/grpc/internal/transport/networktype
google.golang.org/grpc/internal/xds
google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
google.golang.org/grpc/metadata
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
@@ -524,8 +554,8 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.34.2
-## explicit; go 1.20
+# google.golang.org/protobuf v1.36.6
+## explicit; go 1.22
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
@@ -548,6 +578,7 @@ google.golang.org/protobuf/internal/genid
google.golang.org/protobuf/internal/impl
google.golang.org/protobuf/internal/order
google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/protolazy
google.golang.org/protobuf/internal/set
google.golang.org/protobuf/internal/strs
google.golang.org/protobuf/internal/version
@@ -567,6 +598,9 @@ google.golang.org/protobuf/types/known/fieldmaskpb
google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
+# gopkg.in/evanphx/json-patch.v4 v4.12.0
+## explicit
+gopkg.in/evanphx/json-patch.v4
# gopkg.in/fsnotify.v1 v1.4.7
## explicit
gopkg.in/fsnotify.v1
@@ -582,8 +616,8 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/api v0.31.7
-## explicit; go 1.22.0
+# k8s.io/api v0.32.3
+## explicit; go 1.23.0
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1alpha1
k8s.io/api/admissionregistration/v1beta1
@@ -608,7 +642,7 @@ k8s.io/api/certificates/v1
k8s.io/api/certificates/v1alpha1
k8s.io/api/certificates/v1beta1
k8s.io/api/coordination/v1
-k8s.io/api/coordination/v1alpha1
+k8s.io/api/coordination/v1alpha2
k8s.io/api/coordination/v1beta1
k8s.io/api/core/v1
k8s.io/api/discovery/v1
@@ -632,6 +666,7 @@ k8s.io/api/rbac/v1
k8s.io/api/rbac/v1alpha1
k8s.io/api/rbac/v1beta1
k8s.io/api/resource/v1alpha3
+k8s.io/api/resource/v1beta1
k8s.io/api/scheduling/v1
k8s.io/api/scheduling/v1alpha1
k8s.io/api/scheduling/v1beta1
@@ -639,11 +674,12 @@ k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
k8s.io/api/storagemigration/v1alpha1
-# k8s.io/apimachinery v0.31.7
-## explicit; go 1.22.0
+# k8s.io/apimachinery v0.32.3
+## explicit; go 1.23.0
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
+k8s.io/apimachinery/pkg/api/meta/testrestmapper
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/api/validation
k8s.io/apimachinery/pkg/apis/meta/internalversion
@@ -659,6 +695,7 @@ k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/runtime/serializer
+k8s.io/apimachinery/pkg/runtime/serializer/cbor
k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct
k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes
k8s.io/apimachinery/pkg/runtime/serializer/json
@@ -689,8 +726,8 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/client-go v0.31.7
-## explicit; go 1.22.0
+# k8s.io/client-go v0.32.3
+## explicit; go 1.23.0
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
@@ -708,7 +745,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1
k8s.io/client-go/applyconfigurations/certificates/v1alpha1
k8s.io/client-go/applyconfigurations/certificates/v1beta1
k8s.io/client-go/applyconfigurations/coordination/v1
-k8s.io/client-go/applyconfigurations/coordination/v1alpha1
+k8s.io/client-go/applyconfigurations/coordination/v1alpha2
k8s.io/client-go/applyconfigurations/coordination/v1beta1
k8s.io/client-go/applyconfigurations/core/v1
k8s.io/client-go/applyconfigurations/discovery/v1
@@ -734,6 +771,7 @@ k8s.io/client-go/applyconfigurations/rbac/v1
k8s.io/client-go/applyconfigurations/rbac/v1alpha1
k8s.io/client-go/applyconfigurations/rbac/v1beta1
k8s.io/client-go/applyconfigurations/resource/v1alpha3
+k8s.io/client-go/applyconfigurations/resource/v1beta1
k8s.io/client-go/applyconfigurations/scheduling/v1
k8s.io/client-go/applyconfigurations/scheduling/v1alpha1
k8s.io/client-go/applyconfigurations/scheduling/v1beta1
@@ -768,7 +806,7 @@ k8s.io/client-go/kubernetes/typed/certificates/v1
k8s.io/client-go/kubernetes/typed/certificates/v1alpha1
k8s.io/client-go/kubernetes/typed/certificates/v1beta1
k8s.io/client-go/kubernetes/typed/coordination/v1
-k8s.io/client-go/kubernetes/typed/coordination/v1alpha1
+k8s.io/client-go/kubernetes/typed/coordination/v1alpha2
k8s.io/client-go/kubernetes/typed/coordination/v1beta1
k8s.io/client-go/kubernetes/typed/core/v1
k8s.io/client-go/kubernetes/typed/discovery/v1
@@ -792,6 +830,7 @@ k8s.io/client-go/kubernetes/typed/rbac/v1
k8s.io/client-go/kubernetes/typed/rbac/v1alpha1
k8s.io/client-go/kubernetes/typed/rbac/v1beta1
k8s.io/client-go/kubernetes/typed/resource/v1alpha3
+k8s.io/client-go/kubernetes/typed/resource/v1beta1
k8s.io/client-go/kubernetes/typed/scheduling/v1
k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1
k8s.io/client-go/kubernetes/typed/scheduling/v1beta1
@@ -808,6 +847,7 @@ k8s.io/client-go/pkg/version
k8s.io/client-go/plugin/pkg/client/auth/exec
k8s.io/client-go/rest
k8s.io/client-go/rest/watch
+k8s.io/client-go/testing
k8s.io/client-go/tools/auth
k8s.io/client-go/tools/clientcmd
k8s.io/client-go/tools/clientcmd/api
@@ -819,6 +859,7 @@ k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
k8s.io/client-go/transport
+k8s.io/client-go/util/apply
k8s.io/client-go/util/cert
k8s.io/client-go/util/connrotation
k8s.io/client-go/util/consistencydetector
@@ -837,7 +878,7 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
-# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
+# k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f
## explicit; go 1.20
k8s.io/kube-openapi/pkg/cached
k8s.io/kube-openapi/pkg/common
@@ -848,20 +889,21 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/spec3
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/validation/spec
-# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+# k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
## explicit; go 1.18
k8s.io/utils/clock
k8s.io/utils/clock/testing
k8s.io/utils/inotify
+k8s.io/utils/internal/third_party/forked/golang/golang-lru
k8s.io/utils/internal/third_party/forked/golang/net
+k8s.io/utils/lru
k8s.io/utils/net
k8s.io/utils/ptr
-k8s.io/utils/strings/slices
-# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
-## explicit; go 1.18
+# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
+## explicit; go 1.21
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
-# sigs.k8s.io/structured-merge-diff/v4 v4.4.1
+# sigs.k8s.io/structured-merge-diff/v4 v4.4.2
## explicit; go 1.13
sigs.k8s.io/structured-merge-diff/v4/fieldpath
sigs.k8s.io/structured-merge-diff/v4/merge
diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile
index 07b8bfa85..fb6cf040f 100644
--- a/vendor/sigs.k8s.io/json/Makefile
+++ b/vendor/sigs.k8s.io/json/Makefile
@@ -19,7 +19,7 @@ vet:
go vet sigs.k8s.io/json
@echo "checking for external dependencies"
- @deps=$$(go mod graph); \
+ @deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \
if [ -n "$${deps}" ]; then \
echo "only stdlib dependencies allowed, found:"; \
echo "$${deps}"; \
diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS
index 0fadafbdd..a08a434e6 100644
--- a/vendor/sigs.k8s.io/json/OWNERS
+++ b/vendor/sigs.k8s.io/json/OWNERS
@@ -2,5 +2,5 @@
approvers:
- deads2k
- - lavalamp
+ - jpbetz
- liggitt
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
index 6a13cf2df..d538ac119 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -21,10 +21,10 @@ import (
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v. If v is nil or not a pointer,
-// Unmarshal returns an InvalidUnmarshalError.
+// Unmarshal returns an [InvalidUnmarshalError].
//
// Unmarshal uses the inverse of the encodings that
-// Marshal uses, allocating maps, slices, and pointers as necessary,
+// [Marshal] uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
@@ -33,28 +33,28 @@ import (
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
-// To unmarshal JSON into a value implementing the Unmarshaler interface,
-// Unmarshal calls that value's UnmarshalJSON method, including
+// To unmarshal JSON into a value implementing [Unmarshaler],
+// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
// when the input is a JSON null.
-// Otherwise, if the value implements encoding.TextUnmarshaler
-// and the input is a JSON quoted string, Unmarshal calls that value's
-// UnmarshalText method with the unquoted form of the string.
+// Otherwise, if the value implements [encoding.TextUnmarshaler]
+// and the input is a JSON quoted string, Unmarshal calls
+// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
-// keys to the keys used by Marshal (either the struct field name or its tag),
+// keys to the keys used by [Marshal] (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match. By
// default, object keys which don't have a corresponding struct field are
-// ignored (see Decoder.DisallowUnknownFields for an alternative).
+// ignored (see [Decoder.DisallowUnknownFields] for an alternative).
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
-// bool, for JSON booleans
-// float64, for JSON numbers
-// string, for JSON strings
-// []interface{}, for JSON arrays
-// map[string]interface{}, for JSON objects
-// nil for JSON null
+// - bool, for JSON booleans
+// - float64, for JSON numbers
+// - string, for JSON strings
+// - []interface{}, for JSON arrays
+// - map[string]interface{}, for JSON objects
+// - nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
// to zero and then appends each element to the slice.
@@ -72,16 +72,15 @@ import (
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
// reuses the existing map, keeping existing entries. Unmarshal then stores
// key-value pairs from the JSON object into the map. The map's key type must
-// either be any string type, an integer, implement json.Unmarshaler, or
-// implement encoding.TextUnmarshaler.
+// either be any string type, an integer, or implement [encoding.TextUnmarshaler].
//
-// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
+// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshaling as best it can.
// If no more serious errors are encountered, Unmarshal returns
-// an UnmarshalTypeError describing the earliest such error. In any
+// an [UnmarshalTypeError] describing the earliest such error. In any
// case, it's not guaranteed that all the remaining fields following
// the problematic one will be unmarshaled into the target object.
//
@@ -119,7 +118,7 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
//
-// By convention, to approximate the behavior of Unmarshal itself,
+// By convention, to approximate the behavior of [Unmarshal] itself,
// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
@@ -157,8 +156,8 @@ func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
+// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
+// (The argument to [Unmarshal] must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
@@ -573,17 +572,10 @@ func (d *decodeState) array(v reflect.Value) error {
break
}
- // Get element of array, growing if necessary.
+ // Expand slice length, growing the slice if necessary.
if v.Kind() == reflect.Slice {
- // Grow slice if necessary
if i >= v.Cap() {
- newcap := v.Cap() + v.Cap()/2
- if newcap < 4 {
- newcap = 4
- }
- newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
- reflect.Copy(newv, v)
- v.Set(newv)
+ v.Grow(1)
}
if i >= v.Len() {
v.SetLen(i + 1)
@@ -620,13 +612,11 @@ func (d *decodeState) array(v reflect.Value) error {
if i < v.Len() {
if v.Kind() == reflect.Array {
- // Array. Zero the rest.
- z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
- v.Index(i).Set(z)
+ v.Index(i).SetZero() // zero remainder of array
}
} else {
- v.SetLen(i)
+ v.SetLen(i) // truncate the slice
}
}
if i == 0 && v.Kind() == reflect.Slice {
@@ -636,7 +626,7 @@ func (d *decodeState) array(v reflect.Value) error {
}
var nullLiteral = []byte("null")
-var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
// object consumes an object from d.data[d.off-1:], decoding into v.
// The first byte ('{') of the object has been read already.
@@ -776,7 +766,7 @@ func (d *decodeState) object(v reflect.Value) error {
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
- mapElem.Set(reflect.Zero(elemType))
+ mapElem.SetZero()
}
subv = mapElem
if checkDuplicateField != nil {
@@ -784,28 +774,14 @@ func (d *decodeState) object(v reflect.Value) error {
}
d.appendStrictFieldStackKey(string(key))
} else {
- var f *field
- if i, ok := fields.nameIndex[string(key)]; ok {
- // Found an exact name match.
- f = &fields.list[i]
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- } else if !d.caseSensitive {
- // Fall back to the expensive case-insensitive
- // linear search.
- for i := range fields.list {
- ff := &fields.list[i]
- if ff.equalFold(ff.nameBytes, key) {
- f = ff
- if checkDuplicateField != nil {
- checkDuplicateField(i, f.name)
- }
- break
- }
- }
+ f := fields.byExactName[string(key)]
+ if f == nil && !d.caseSensitive {
+ f = fields.byFoldedName[string(foldName(key))]
}
if f != nil {
+ if checkDuplicateField != nil {
+ checkDuplicateField(f.listIndex, f.name)
+ }
subv = v
destring = f.quoted
for _, i := range f.index {
@@ -874,33 +850,35 @@ func (d *decodeState) object(v reflect.Value) error {
if v.Kind() == reflect.Map {
kt := t.Key()
var kv reflect.Value
- switch {
- case reflect.PointerTo(kt).Implements(textUnmarshalerType):
+ if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
kv = reflect.New(kt)
if err := d.literalStore(item, kv, true); err != nil {
return err
}
kv = kv.Elem()
- case kt.Kind() == reflect.String:
- kv = reflect.ValueOf(key).Convert(kt)
- default:
+ } else {
switch kt.Kind() {
+ case reflect.String:
+ kv = reflect.New(kt).Elem()
+ kv.SetString(string(key))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, err := strconv.ParseInt(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowInt(n) {
+ if err != nil || kt.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, err := strconv.ParseUint(s, 10, 64)
- if err != nil || reflect.Zero(kt).OverflowUint(n) {
+ if err != nil || kt.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
break
}
- kv = reflect.ValueOf(n).Convert(kt)
+ kv = reflect.New(kt).Elem()
+ kv.SetUint(n)
default:
panic("json: Unexpected key type") // should never occur
}
@@ -950,12 +928,12 @@ func (d *decodeState) convertNumber(s string) (any, error) {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
- return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+ return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
}
return f, nil
}
-var numberType = reflect.TypeOf(Number(""))
+var numberType = reflect.TypeFor[Number]()
// literalStore decodes a literal stored in item into v.
//
@@ -965,7 +943,7 @@ var numberType = reflect.TypeOf(Number(""))
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
// Check for unmarshaler.
if len(item) == 0 {
- //Empty string given
+ // Empty string given.
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return nil
}
@@ -1012,7 +990,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
switch v.Kind() {
case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
- v.Set(reflect.Zero(v.Type()))
+ v.SetZero()
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
@@ -1064,10 +1042,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
v.SetBytes(b[:n])
case reflect.String:
- if v.Type() == numberType && !isValidNumber(string(s)) {
+ t := string(s)
+ if v.Type() == numberType && !isValidNumber(t) {
return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
}
- v.SetString(string(s))
+ v.SetString(t)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
@@ -1083,13 +1062,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
panic(phasePanicMsg)
}
- s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
// s must be a valid number, because it's
// already been tokenized.
- v.SetString(s)
+ v.SetString(string(item))
break
}
if fromQuoted {
@@ -1097,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
}
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
case reflect.Interface:
- n, err := d.convertNumber(s)
+ n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
break
@@ -1109,25 +1087,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n, err := strconv.ParseInt(s, 10, 64)
+ n, err := strconv.ParseInt(string(item), 10, 64)
if err != nil || v.OverflowInt(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- n, err := strconv.ParseUint(s, 10, 64)
+ n, err := strconv.ParseUint(string(item), 10, 64)
if err != nil || v.OverflowUint(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
- n, err := strconv.ParseFloat(s, v.Type().Bits())
+ n, err := strconv.ParseFloat(string(item), v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
- d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
break
}
v.SetFloat(n)
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
index 5b67251fb..eb73bff58 100644
--- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
@@ -12,12 +12,13 @@ package json
import (
"bytes"
+ "cmp"
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
- "sort"
+ "slices"
"strconv"
"strings"
"sync"
@@ -28,29 +29,30 @@ import (
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
-// If an encountered value implements the Marshaler interface
-// and is not a nil pointer, Marshal calls its MarshalJSON method
-// to produce JSON. If no MarshalJSON method is present but the
-// value implements encoding.TextMarshaler instead, Marshal calls
-// its MarshalText method and encodes the result as a JSON string.
+// If an encountered value implements [Marshaler]
+// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
+// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
+// value implements [encoding.TextMarshaler] instead, Marshal calls
+// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
// The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
-// UnmarshalJSON.
+// [Unmarshaler.UnmarshalJSON].
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
-// Floating point, integer, and Number values encode as JSON numbers.
+// Floating point, integer, and [Number] values encode as JSON numbers.
+// NaN and +/-Inf values will return an [UnsupportedValueError].
//
// String values encode as JSON strings coerced to valid UTF-8,
// replacing invalid bytes with the Unicode replacement rune.
// So that the JSON will be safe to embed inside HTML